code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Any = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "t5" A__ = ["past_key_values"] A__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Any , __lowerCamelCase : Union[str, Any]=32128 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : str=32 , __lowerCamelCase : Dict=128 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=1E-6 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , **__lowerCamelCase : Union[str, Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : List[str] = d_model lowerCamelCase__ : str = d_kv lowerCamelCase__ : Optional[int] = d_ff lowerCamelCase__ : Tuple = num_layers lowerCamelCase__ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCamelCase__ : int = num_heads lowerCamelCase__ : List[Any] = relative_attention_num_buckets lowerCamelCase__ : List[str] = relative_attention_max_distance lowerCamelCase__ : Optional[Any] = dropout_rate lowerCamelCase__ : int = layer_norm_epsilon lowerCamelCase__ : int = initializer_factor lowerCamelCase__ : Optional[Any] = feed_forward_proj lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : Any = self.feed_forward_proj.split("-" ) lowerCamelCase__ : Union[str, Any] = act_info[-1] lowerCamelCase__ : Dict = act_info[0] == "gated" if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCamelCase__ : Union[str, Any] = "gelu_new" super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCamelCase__ : str = "past_encoder_sequence + sequence" lowerCamelCase__ : Any = {0: "batch"} lowerCamelCase__ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Dict = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : Dict = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="inputs" ) return common_inputs @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return 13
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Tuple = { "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = ["RobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Dict=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Any=9 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.0_0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : Union[str, Any] = batch_size lowerCamelCase__ : Any = encoder_seq_length lowerCamelCase__ : str = decoder_seq_length # For common tests lowerCamelCase__ : Optional[int] = self.decoder_seq_length lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[Any] = use_attention_mask lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[int] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : Union[str, Any] = num_attention_heads lowerCamelCase__ : Any = d_ff lowerCamelCase__ : Any = relative_attention_num_buckets lowerCamelCase__ : Optional[Any] = dropout_rate lowerCamelCase__ : int = initializer_factor lowerCamelCase__ : Optional[Any] = eos_token_id lowerCamelCase__ : Dict = pad_token_id lowerCamelCase__ : Optional[Any] = decoder_start_token_id lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : List[str] = decoder_layers def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return TaConfig.from_pretrained("google/umt5-base" ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ): '''simple docstring''' if attention_mask is None: lowerCamelCase__ : Union[str, Any] = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCamelCase__ : Any = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCamelCase__ : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ ) if decoder_head_mask is None: lowerCamelCase__ : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ ) if cross_attn_head_mask is None: lowerCamelCase__ : Union[str, Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase__ : str = self.get_config() lowerCamelCase__ : Tuple = config.num_attention_heads lowerCamelCase__ : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, input_dict def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[str] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase ( self : Dict ): '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , ): '''simple docstring''' lowerCamelCase__ : str = UMTaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowerCamelCase__ : str = model( input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , ) lowerCamelCase__ : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) lowerCamelCase__ : int = result.last_hidden_state lowerCamelCase__ : Dict = result.past_key_values lowerCamelCase__ : Dict = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , ): '''simple docstring''' lowerCamelCase__ : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval() # first forward pass lowerCamelCase__ : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) lowerCamelCase__ : List[Any] = model(UpperCAmelCase__ ) lowerCamelCase__ : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) ) self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 ) lowerCamelCase__ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowerCamelCase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase__ : Any = model(UpperCAmelCase__ )['''last_hidden_state'''] lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state'''] # select random slice lowerCamelCase__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase__ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase__ : Tuple = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval() lowerCamelCase__ : str = model(**UpperCAmelCase__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() ) @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A__ = (UMTaForConditionalGeneration,) if is_torch_available() else () A__ = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A__ = True A__ = False A__ = False A__ = True A__ = True # The small UMT5 model needs higher percentages for CPU/MP tests A__ = [0.8, 0.9] def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ : int = config_and_inputs[0] lowerCamelCase__ : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval() model.to(UpperCAmelCase__ ) lowerCamelCase__ : str = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ), } for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ): lowerCamelCase__ : int = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowerCamelCase__ : List[str] = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ) lowerCamelCase__ : Union[str, Any] = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowerCamelCase__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def lowerCAmelCase ( self : Any ): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ ) lowerCamelCase__ : int = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ ) lowerCamelCase__ : List[str] = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] lowerCamelCase__ : Dict = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ).input_ids # fmt: off lowerCamelCase__ : Optional[Any] = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCamelCase__ : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) ) lowerCamelCase__ : int = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset A : int = random.Random() def UpperCamelCase__ ( _A : List[str] , _A : List[str]=1.0 , _A : Union[str, Any]=None , _A : Dict=None ): """simple docstring""" if rng is None: lowerCamelCase__ : int = global_rng lowerCamelCase__ : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Any=400 , __lowerCamelCase : str=2000 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : int=128 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=30 , __lowerCamelCase : Optional[int]=44100 , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : Tuple = batch_size lowerCamelCase__ : int = min_seq_length lowerCamelCase__ : Union[str, Any] = max_seq_length lowerCamelCase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase__ : str = spectrogram_length lowerCamelCase__ : Optional[Any] = feature_size lowerCamelCase__ : Any = num_audio_channels lowerCamelCase__ : Tuple = hop_length lowerCamelCase__ : Optional[Any] = chunk_length lowerCamelCase__ : Union[str, Any] = sampling_rate def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False ): '''simple docstring''' def _flatten(__lowerCamelCase : Optional[int] ): return list(itertools.chain(*snake_case__ ) ) if equal_length: lowerCamelCase__ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase__ : str = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase__ : List[str] = [np.asarray(snake_case__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase): """simple docstring""" A__ = TvltFeatureExtractor def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = TvltFeatureExtractionTester(self ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case__ , "spectrogram_length" ) ) self.assertTrue(hasattr(snake_case__ , "feature_size" ) ) self.assertTrue(hasattr(snake_case__ , "num_audio_channels" ) ) self.assertTrue(hasattr(snake_case__ , "hop_length" ) ) self.assertTrue(hasattr(snake_case__ , "chunk_length" ) ) self.assertTrue(hasattr(snake_case__ , "sampling_rate" ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ : Tuple = feat_extract_first.save_pretrained(snake_case__ )[0] check_json_file_has_correct_format(snake_case__ ) lowerCamelCase__ : List[Any] = self.feature_extraction_class.from_pretrained(snake_case__ ) lowerCamelCase__ : int = feat_extract_first.to_dict() lowerCamelCase__ : Optional[Any] = feat_extract_second.to_dict() lowerCamelCase__ : Optional[int] = dict_first.pop("mel_filters" ) lowerCamelCase__ : List[str] = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(snake_case__ , snake_case__ ) ) self.assertEqual(snake_case__ , snake_case__ ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ : int = os.path.join(snake_case__ , "feat_extract.json" ) feat_extract_first.to_json_file(snake_case__ ) lowerCamelCase__ : List[Any] = self.feature_extraction_class.from_json_file(snake_case__ ) lowerCamelCase__ : int = feat_extract_first.to_dict() lowerCamelCase__ : List[Any] = feat_extract_second.to_dict() lowerCamelCase__ : Optional[Any] = dict_first.pop("mel_filters" ) lowerCamelCase__ : str = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(snake_case__ , snake_case__ ) ) self.assertEqual(snake_case__ , snake_case__ ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase__ : List[Any] = [np.asarray(snake_case__ ) for speech_input in speech_inputs] # Test not batched input lowerCamelCase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched lowerCamelCase__ : List[Any] = feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking lowerCamelCase__ : Optional[int] = feature_extractor( snake_case__ , return_tensors="np" , sampling_rate=44100 , mask_audio=snake_case__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase__ : Optional[int] = np.asarray(snake_case__ ) lowerCamelCase__ : Any = feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech lowerCamelCase__ : str = ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Any = self._load_datasamples(1 ) lowerCamelCase__ : List[str] = TvltFeatureExtractor() lowerCamelCase__ : Optional[int] = feature_extractor(snake_case__ , return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) lowerCamelCase__ : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1E-4 ) )
718
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
'''simple docstring''' def lowercase_ ( _A : str ): """simple docstring""" return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : str = credit_card_number lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Any = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit lowerCamelCase__ : int = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 lowerCamelCase__ : int = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import torch def lowercase_ ( ): """simple docstring""" if torch.cuda.is_available(): lowerCamelCase__ : int = torch.cuda.device_count() else: lowerCamelCase__ : int = 0 print(F"Successfully ran on {num_gpus} GPUs" ) if __name__ == "__main__": main()
720
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
0
import random from typing import Any def lowercase_ ( _A : list ): """simple docstring""" for _ in range(len(_A ) ): lowerCamelCase__ : str = random.randint(0 , len(_A ) - 1 ) lowerCamelCase__ : Optional[int] = random.randint(0 , len(_A ) - 1 ) lowerCamelCase__ : List[str] = data[b], data[a] return data if __name__ == "__main__": A : Optional[Any] = [0, 1, 2, 3, 4, 5, 6, 7] A : List[str] = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
721
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
0
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A : str = logging.get_logger(__name__) A : Union[str, Any] = { "vocab_file": "vocab.txt", "merges_file": "bpe.codes", } A : Dict = { "vocab_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt", }, "merges_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes", }, } A : int = { "vinai/phobert-base": 256, "vinai/phobert-large": 256, } def lowercase_ ( _A : str ): lowerCamelCase__ : List[str] = set() lowerCamelCase__ : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : List[Any] = char lowerCamelCase__ : str = set(_lowerCAmelCase ) return pairs class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : List[str]="<mask>" , **__lowerCamelCase : int , ): '''simple docstring''' super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) lowerCamelCase__ : List[Any] = vocab_file lowerCamelCase__ : List[str] = merges_file lowerCamelCase__ : int = {} lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : Any = 2 lowerCamelCase__ : str = 3 self.add_from_file(_lowerCamelCase ) lowerCamelCase__ : Tuple = {v: k for k, v in self.encoder.items()} with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : Union[str, Any] = merges_handle.read().split("\n" )[:-1] lowerCamelCase__ : Dict = [tuple(merge.split()[:-1] ) for merge in merges] lowerCamelCase__ : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) lowerCamelCase__ : List[str] = {} def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : Any = [self.cls_token_id] lowerCamelCase__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Tuple = [self.sep_token_id] lowerCamelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : int , __lowerCamelCase : Any ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : int = tuple(_lowerCamelCase ) lowerCamelCase__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) lowerCamelCase__ : List[Any] = get_pairs(_lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : Union[str, Any] = min(_lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : List[str] = 0 while i < len(_lowerCamelCase ): try: lowerCamelCase__ : Tuple = word.index(_lowerCamelCase , _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : str = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Tuple = tuple(_lowerCamelCase ) lowerCamelCase__ : Optional[int] = new_word if len(_lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[Any] = get_pairs(_lowerCamelCase ) lowerCamelCase__ : Dict = "@@ ".join(_lowerCamelCase ) lowerCamelCase__ : Optional[int] = word[:-4] lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : int = [] lowerCamelCase__ : Optional[Any] = re.findall(R"\S+\n?" , _lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(" " ) ) ) return split_tokens def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ): '''simple docstring''' return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] ): '''simple docstring''' return self.decoder.get(_lowerCamelCase , self.unk_token ) def lowerCAmelCase ( self : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = " ".join(_lowerCamelCase ).replace("@@ " , "" ).strip() return out_string def lowerCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : Optional[int] = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : List[str] = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.merges_file , _lowerCamelCase ) return out_vocab_file, out_merge_file def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): try: with open(_lowerCamelCase , "r" , encoding="utf-8" ) as fd: self.add_from_file(_lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return lowerCamelCase__ : str = f.readlines() for lineTmp in lines: lowerCamelCase__ : Any = lineTmp.strip() lowerCamelCase__ : List[Any] = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected \'<token> <cnt>\'" ) lowerCamelCase__ : Optional[Any] = line[:idx] lowerCamelCase__ : Any = len(self.encoder )
700
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
0
from __future__ import annotations def lowercase_ ( _A : list[int] , _A : list[int] , _A : list[int] , _A : list[list[str]] , _A : int , ): """simple docstring""" lowerCamelCase__ : str = len(lowerCamelCase_ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowerCamelCase_ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase_ , lowerCamelCase_ , ) def lowercase_ ( _A : int ): """simple docstring""" lowerCamelCase__ : list[list[str]] = [] depth_first_search([] , [] , [] , lowerCamelCase_ , lowerCamelCase_ ) # Print all the boards for board in boards: for column in board: print(lowerCamelCase_ ) print("" ) print(len(lowerCamelCase_ ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
701
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) A : List[Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : str = {} state_dict.pop("pixel_mean" , UpperCamelCase__ ) state_dict.pop("pixel_std" , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase__ : str = key.replace(UpperCamelCase__ , UpperCamelCase__ ) if re.match(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Union[str, Any] = int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(2 ) ) if layer_nb == 0: lowerCamelCase__ : int = key.replace("layers.0" , "proj_in" ) elif layer_nb == 1: lowerCamelCase__ : Any = key.replace("layers.1" , "layers.0" ) elif layer_nb == 2: lowerCamelCase__ : Optional[Any] = key.replace("layers.2" , "proj_out" ) lowerCamelCase__ : Optional[int] = value lowerCamelCase__ : List[str] = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def lowercase_ ( _A : Tuple , _A : Any , _A : Optional[int] , _A : str="ybelkada/segment-anything" ): """simple docstring""" lowerCamelCase__ : List[Any] = hf_hub_download(UpperCamelCase__ , F"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: lowerCamelCase__ : Union[str, Any] = SamConfig() elif "sam_vit_l" in model_name: lowerCamelCase__ : str = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCamelCase__ : str = SamConfig( vision_config=UpperCamelCase__ , ) elif "sam_vit_h" in model_name: lowerCamelCase__ : Optional[int] = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCamelCase__ : Union[str, Any] = SamConfig( vision_config=UpperCamelCase__ , ) lowerCamelCase__ : int = torch.load(UpperCamelCase__ , map_location="cpu" ) lowerCamelCase__ : Any = replace_keys(UpperCamelCase__ ) lowerCamelCase__ : Tuple = SamImageProcessor() lowerCamelCase__ : Dict = SamProcessor(image_processor=UpperCamelCase__ ) lowerCamelCase__ : Tuple = SamModel(UpperCamelCase__ ) hf_model.load_state_dict(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = hf_model.to("cuda" ) lowerCamelCase__ : Union[str, Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" ) lowerCamelCase__ : Dict = [[[400, 650]]] lowerCamelCase__ : Tuple = [[1]] lowerCamelCase__ : Optional[int] = processor(images=np.array(UpperCamelCase__ ) , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase__ : List[str] = hf_model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 lowerCamelCase__ : int = processor( images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = hf_model(**UpperCamelCase__ ) lowerCamelCase__ : int = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 lowerCamelCase__ : str = ((75, 275, 1725, 850),) lowerCamelCase__ : Tuple = processor(images=np.array(UpperCamelCase__ ) , input_boxes=UpperCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase__ : Dict = hf_model(**UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. lowerCamelCase__ : Union[str, Any] = [[[400, 650], [800, 650]]] lowerCamelCase__ : Union[str, Any] = [[1, 1]] lowerCamelCase__ : List[str] = processor( images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): lowerCamelCase__ : Optional[Any] = hf_model(**UpperCamelCase__ ) lowerCamelCase__ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() A : Optional[int] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) A : Optional[int] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
702
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
0
def lowercase_ ( _A : Optional[int] ): """simple docstring""" return "".join(chr(ord(__SCREAMING_SNAKE_CASE ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
703
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
0
import sys A : Any = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowercase_ ( _A : Union[str, Any] = N ): """simple docstring""" lowerCamelCase__ : str = -sys.maxsize - 1 for i in range(len(_A ) - 12 ): lowerCamelCase__ : Optional[int] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: lowerCamelCase__ : Tuple = product return largest_product if __name__ == "__main__": print(f'{solution() = }')
704
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
0
import glob import os import random from string import ascii_lowercase, digits import cva A : Any = '' A : List[Any] = '' A : Dict = '' A : Tuple = 1 # (0 is vertical, 1 is horizontal) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Tuple = get_dataset(_lowerCamelCase , _lowerCamelCase ) print("Processing..." ) lowerCamelCase__ : Union[str, Any] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for index, image in enumerate(_lowerCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowerCamelCase__ : Optional[Any] = random_chars(32 ) lowerCamelCase__ : List[str] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] lowerCamelCase__ : Tuple = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(F"/{file_root}.jpg" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Success {index+1}/{len(_lowerCamelCase )} with {file_name}" ) lowerCamelCase__ : List[Any] = [] for anno in new_annos[index]: lowerCamelCase__ : Union[str, Any] = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(_lowerCamelCase ) with open(F"/{file_root}.txt" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase_ ( _A : str , _A : str ): """simple docstring""" lowerCamelCase__ : Any = [] lowerCamelCase__ : Optional[Any] = [] for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ): lowerCamelCase__ : List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCamelCase ) as in_file: lowerCamelCase__ : Optional[int] = in_file.readlines() lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , F"{label_name}.jpg" ) lowerCamelCase__ : Any = [] for obj_list in obj_lists: lowerCamelCase__ : Optional[Any] = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_lowerCamelCase ) labels.append(_lowerCamelCase ) return img_paths, labels def lowercase_ ( _A : list , _A : list , _A : int = 1 ): """simple docstring""" lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : Any = [] lowerCamelCase__ : List[Any] = [] for idx in range(len(_lowerCamelCase ) ): lowerCamelCase__ : Union[str, Any] = [] lowerCamelCase__ : Optional[int] = img_list[idx] path_list.append(_lowerCamelCase ) lowerCamelCase__ : Tuple = anno_list[idx] lowerCamelCase__ : Union[str, Any] = cva.imread(_lowerCamelCase ) if flip_type == 1: lowerCamelCase__ : Union[str, Any] = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: lowerCamelCase__ : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: lowerCamelCase__ : Optional[int] = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: lowerCamelCase__ : Any = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_lowerCamelCase ) new_imgs_list.append(_lowerCamelCase ) return new_imgs_list, new_annos_lists, path_list def lowercase_ ( _A : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" lowerCamelCase__ : Optional[Any] = ascii_lowercase + digits return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
705
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _lowercase ( lowercase__): """simple docstring""" A__ = ["image_processor", "tokenizer"] A__ = "OwlViTImageProcessor" A__ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : str , __lowerCamelCase : Any=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCamelCase , ) lowerCamelCase__ : Optional[Any] = kwargs.pop("feature_extractor" ) lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__( self : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]="max_length" , __lowerCamelCase : List[Any]="np" , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )): lowerCamelCase__ : Optional[Any] = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )] elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ): lowerCamelCase__ : Union[str, Any] = [] # Maximum number of queries across batch lowerCamelCase__ : Optional[int] = max([len(__UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCamelCase ) != max_num_queries: lowerCamelCase__ : int = t + [" "] * (max_num_queries - len(__UpperCamelCase )) lowerCamelCase__ : Union[str, Any] = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) encodings.append(__UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase__ : Optional[int] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase__ : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase__ : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase__ : List[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase__ : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase__ : List[str] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase__ : List[Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase__ : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase__ : List[str] = BatchEncoding() lowerCamelCase__ : Dict = input_ids lowerCamelCase__ : Optional[Any] = attention_mask if query_images is not None: lowerCamelCase__ : Union[str, Any] = BatchEncoding() lowerCamelCase__ : Dict = self.image_processor( __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values lowerCamelCase__ : Union[str, Any] = query_pixel_values if images is not None: lowerCamelCase__ : Optional[int] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if text is not None and images is not None: lowerCamelCase__ : Optional[int] = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase ) def lowerCAmelCase ( self : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( self : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def lowerCAmelCase ( self : Any ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , ) return self.image_processor
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING A : str = logging.get_logger(__name__) @add_end_docstrings(A_) class _lowercase ( A_): """simple docstring""" def __init__( self : Tuple , **__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = {} lowerCamelCase__ : Tuple = {} lowerCamelCase__ : Dict = {} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__ : str = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: lowerCamelCase__ : int = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: lowerCamelCase__ : Optional[Any] = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: lowerCamelCase__ : Dict = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__ : Dict = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__ : List[Any] = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: lowerCamelCase__ : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: lowerCamelCase__ : Optional[int] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: lowerCamelCase__ : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: lowerCamelCase__ : List[Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: lowerCamelCase__ : int = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: lowerCamelCase__ : Tuple = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : List[Any] , __lowerCamelCase : Any , *__lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[int] ): '''simple docstring''' return super().__call__(__lowerCamelCase , *__lowerCamelCase , num_workers=__lowerCamelCase , batch_size=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Any = 0 , __lowerCamelCase : List[str] = 512 / 1500 , __lowerCamelCase : List[str] = 32 , __lowerCamelCase : Dict = 1 , ): '''simple docstring''' lowerCamelCase__ : str = load_image(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = self.image_processor.size["""longest_edge"""] lowerCamelCase__ : List[str] = self.image_processor.generate_crop_boxes( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.image_processor(images=__lowerCamelCase , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": lowerCamelCase__ : str = self.get_inference_context() with inference_context(): lowerCamelCase__ : int = self._ensure_tensor_on_device(__lowerCamelCase , device=self.device ) lowerCamelCase__ : List[str] = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) lowerCamelCase__ : int = image_embeddings lowerCamelCase__ : str = grid_points.shape[1] lowerCamelCase__ : Optional[int] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , __lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Any = grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__ : List[str] = input_labels[:, i : i + points_per_batch] lowerCamelCase__ : Any = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=0.8_8 , __lowerCamelCase : Tuple=0.9_5 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Any=1 , ): '''simple docstring''' lowerCamelCase__ : Tuple = model_inputs.pop("input_boxes" ) lowerCamelCase__ : List[str] = model_inputs.pop("is_last" ) lowerCamelCase__ : Union[str, Any] = model_inputs.pop("original_sizes" ).tolist() lowerCamelCase__ : List[str] = model_inputs.pop("reshaped_input_sizes" ).tolist() lowerCamelCase__ : Any = self.model(**__lowerCamelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__ : Optional[int] = model_outputs["""pred_masks"""] lowerCamelCase__ : int = self.image_processor.post_process_masks( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , binarize=__lowerCamelCase ) lowerCamelCase__ : Dict = model_outputs["""iou_scores"""] lowerCamelCase__ : List[str] = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[Any]=0.7 , ): '''simple docstring''' lowerCamelCase__ : int = [] lowerCamelCase__ : Optional[int] = [] lowerCamelCase__ : List[Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) lowerCamelCase__ : Union[str, Any] = torch.cat(__lowerCamelCase ) lowerCamelCase__ : Any = torch.cat(__lowerCamelCase ) lowerCamelCase__ : Dict = self.image_processor.post_process_for_mask_generation( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = defaultdict(__lowerCamelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {} if output_rle_mask: lowerCamelCase__ : Dict = rle_mask if output_bboxes_mask: lowerCamelCase__ : str = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
707
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
from __future__ import annotations from math import gcd def lowercase_ ( _A : int , _A : int = 2 , _A : int = 1 , _A : int = 3 , ): """simple docstring""" if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_A : int , _A : int , _A : int ) -> int: return (pow(lowerCAmelCase__ , 2 ) + step) % modulus for _ in range(lowerCAmelCase__ ): # These track the position within the cycle detection logic. lowerCamelCase__ : Any = seed lowerCamelCase__ : Optional[int] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCamelCase__ : str = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowerCamelCase__ : Dict = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowerCamelCase__ : List[str] = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCamelCase__ : int = gcd(hare - tortoise , lowerCAmelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCamelCase__ : Union[str, Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse A : Tuple = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) A : Optional[Any] = parser.parse_args() A : Optional[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'{args.num} is probably prime') else: A : List[Any] = args.num // divisor print(f'{args.num} = {divisor} * {quotient}')
708
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
0
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params A : Union[str, Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def lowercase_ ( _A : str ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: lowerCamelCase__ : int = k.replace(_A , _A ) return k def lowercase_ ( _A : Dict , _A : str ): """simple docstring""" lowerCamelCase__ : Optional[Any] = DEFAULTS.copy() cfg_kwargs.update(_A ) lowerCamelCase__ : int = PegasusConfig(**_A ) lowerCamelCase__ : Union[str, Any] = PegasusForConditionalGeneration(_A ) lowerCamelCase__ : Tuple = torch_model.model.state_dict() lowerCamelCase__ : Tuple = {} for k, v in tf_weights.items(): lowerCamelCase__ : Optional[int] = rename_state_dict_key(_A ) if new_k not in sd: raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: lowerCamelCase__ : str = v.T lowerCamelCase__ : Dict = torch.tensor(_A , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected lowerCamelCase__ : List[Any] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) lowerCamelCase__ : List[Any] = mapping["shared.weight"] lowerCamelCase__ : List[Any] = mapping["shared.weight"] lowerCamelCase__ : Any = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**_A ) lowerCamelCase__ , lowerCamelCase__ : Tuple = torch_model.model.load_state_dict(_A , strict=_A ) lowerCamelCase__ : str = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}" assert extra == [], F"no matches found for the following tf keys {extra}" return torch_model def lowercase_ ( _A : str="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" lowerCamelCase__ : Tuple = tf.train.list_variables(_A ) lowerCamelCase__ : Tuple = {} lowerCamelCase__ : Optional[int] = ["Adafactor", "global_step"] for name, shape in tqdm(_A , desc="converting tf checkpoint to dict" ): lowerCamelCase__ : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase__ : Tuple = tf.train.load_variable(_A , _A ) lowerCamelCase__ : Tuple = array return tf_weights def lowercase_ ( _A : Union[str, Any] , _A : str ): """simple docstring""" lowerCamelCase__ : Tuple = Path(_A ).parent.name lowerCamelCase__ : Optional[int] = task_specific_params[F"summarization_{dataset}"]["max_position_embeddings"] lowerCamelCase__ : int = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=_A ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_A ) # convert model lowerCamelCase__ : List[str] = get_tf_weights_as_numpy(_A ) lowerCamelCase__ : Dict = task_specific_params[F"summarization_{dataset}"] if dataset == "large": lowerCamelCase__ : str = task_specific_params lowerCamelCase__ : Union[str, Any] = convert_pegasus(_A , _A ) torch_model.save_pretrained(_A ) lowerCamelCase__ : List[Any] = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(_A , Path(_A ) / "pytorch_model.bin" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") A : Optional[int] = parser.parse_args() if args.save_dir is None: A : Dict = Path(args.tf_ckpt_path).parent.name A : Tuple = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
709
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A : int = logging.get_logger(__name__) A : Union[str, Any] = torch.device("cpu") def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase__ : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ) return im def lowercase_ ( _A : Tuple ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] ) def lowercase_ ( _A : Union[str, Any] , _A : Optional[Any] , _A : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : int = dct.pop(_A ) lowerCamelCase__ : Union[str, Any] = val def lowercase_ ( _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : Optional[Any] = [] for k in state_dict.keys(): lowerCamelCase__ : str = k if ".pwconv" in k: lowerCamelCase__ : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: lowerCamelCase__ : int = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: lowerCamelCase__ : Dict = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: lowerCamelCase__ : Dict = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: lowerCamelCase__ : Dict = k_new.split("." ) if ls[2].isdigit(): lowerCamelCase__ : List[str] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: lowerCamelCase__ : List[Any] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowercase_ ( _A : Optional[int] , _A : int , _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : Dict = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase__ : Optional[Any] = 1000 lowerCamelCase__ : Dict = "huggingface/label-files" lowerCamelCase__ : Dict = "imagenet-1k-id2label.json" lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) ) lowerCamelCase__ : Any = {int(_A ): v for k, v in idalabel.items()} lowerCamelCase__ : List[str] = idalabel lowerCamelCase__ : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCamelCase__ : Optional[int] = [3, 3, 6, 4] lowerCamelCase__ : str = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCamelCase__ : Optional[int] = [3, 3, 9, 6] lowerCamelCase__ : Tuple = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCamelCase__ : str = [4, 3, 10, 5] lowerCamelCase__ : str = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCamelCase__ : Any = [4, 4, 12, 6] lowerCamelCase__ : str = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): lowerCamelCase__ : List[str] = torch.hub.load_state_dict_from_url(_A , map_location="cpu" , check_hash=_A ) else: lowerCamelCase__ : Tuple = torch.load(_A , map_location="cpu" ) lowerCamelCase__ : Tuple = checkpoint lowerCamelCase__ : Dict = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCamelCase__ : str = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained("preprocessor_config" ) lowerCamelCase__ : int = processor(images=_A , return_tensors="pt" ) # compare outputs from both models lowerCamelCase__ : str = get_expected_output(_A ) lowerCamelCase__ : Dict = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") A : Any = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
710
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
0
def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) lowerCamelCase__ : str = [True] * (num + 1) lowerCamelCase__ : Any = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __lowerCAmelCase ): lowerCamelCase__ : List[str] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
711
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
0
import numpy as np def lowercase_ ( _A : np.ndarray , _A : float ): return np.where(vector > 0 , a_ , (alpha * (np.exp(a_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
712
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase_ ( _A : Optional[Any] , _A : Any=10 ): """simple docstring""" lowerCamelCase__ : Dict = [] for _ in range(UpperCAmelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase_ ( _A : Tuple , _A : Tuple=10 ): """simple docstring""" lowerCamelCase__ : List[Any] = [] for step in range(UpperCAmelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ : Optional[Any] = os.path.join(UpperCAmelCase__ , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCAmelCase__ ) lowerCamelCase__ : str = torch.load(UpperCAmelCase__ ) scheduler.load_state_dict(UpperCAmelCase__ ) return lrs @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int ): '''simple docstring''' self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) lowerCamelCase__ : Dict = torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase__ : Tuple = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase__ : List[str] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): lowerCamelCase__ : Union[str, Any] = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) lowerCamelCase__ : List[Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase__ : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase__ : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_A , weight_decay=0.0 , relative_step=_A , scale_parameter=_A , warmup_init=_A , ) for _ in range(1000 ): lowerCamelCase__ : Union[str, Any] = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" A__ = nn.Linear(50 , 50) if is_torch_available() else None A__ = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None A__ = 10 def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=None ): '''simple docstring''' self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A , msg=_A ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCamelCase__ : int = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): lowerCamelCase__ , lowerCamelCase__ : Tuple = data lowerCamelCase__ : Union[str, Any] = scheduler_func(self.optimizer , **_A ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCamelCase__ : Optional[int] = unwrap_schedule(_A , self.num_steps ) self.assertListAlmostEqual( _A , _A , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) lowerCamelCase__ : str = scheduler_func(self.optimizer , **_A ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule lowerCamelCase__ : List[Any] = unwrap_and_save_reload_schedule(_A , self.num_steps ) self.assertListEqual(_A , _A , msg=f"failed for {scheduler_func} in save and reload" ) class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = fn def __call__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : Dict ): '''simple docstring''' return self.fn(*_A , **_A ) @classmethod def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = list(map(self , scheduler.lr_lambdas ) )
713
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
0
A : Tuple = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
714
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowercase : """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int=13 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=2 , ): '''simple docstring''' lowerCamelCase__ : List[Any] = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : Any = image_size lowerCamelCase__ : List[str] = patch_size lowerCamelCase__ : Tuple = num_channels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : Optional[int] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : int = scope lowerCamelCase__ : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2 lowerCamelCase__ : Tuple = num_patches + 2 def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Any = None if self.use_labels: lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : Any ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTModel(config=__lowerCamelCase ) lowerCamelCase__ : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTForMaskedImageModeling(config=__lowerCamelCase ) lowerCamelCase__ : List[str] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Union[str, Any] = TFDeiTForMaskedImageModeling(__lowerCamelCase ) lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = self.type_sequence_label_size lowerCamelCase__ : int = TFDeiTForImageClassification(__lowerCamelCase ) lowerCamelCase__ : int = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Tuple = TFDeiTForImageClassification(__lowerCamelCase ) lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs lowerCamelCase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) A__ = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) A__ = False A__ = False A__ = False A__ = False def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = TFDeiTModelTester(self ) lowerCamelCase__ : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = model_class(__lowerCamelCase ) lowerCamelCase__ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : int = [*signature.parameters.keys()] lowerCamelCase__ : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : int = TFDeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase): """simple docstring""" @cached_property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) lowerCamelCase__ : Optional[int] = self.default_image_processor lowerCamelCase__ : int = prepare_img() lowerCamelCase__ : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase__ : List[str] = model(**__lowerCamelCase ) # verify the logits lowerCamelCase__ : List[str] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() lowerCamelCase__ : List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : Optional[int] = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } lowerCamelCase__ : List[Any] = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } lowerCamelCase__ : Tuple = tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) # load decoder from hub lowerCamelCase__ : Dict = "hf-internal-testing/ngram-beam-search-decoder" def lowerCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = self.add_kwargs_tokens_map.copy() kwargs.update(__lowerCamelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowerCAmelCase ( self : int , **__lowerCamelCase : str ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowerCAmelCase ( self : int , **__lowerCamelCase : Dict ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : str = self.get_feature_extractor() lowerCamelCase__ : Any = self.get_decoder() lowerCamelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __lowerCamelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase__ : str = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[str] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(__lowerCamelCase , "include" ): WavaVecaProcessorWithLM( tokenizer=__lowerCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.get_feature_extractor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = self.get_decoder() lowerCamelCase__ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = floats_list((3, 1000) ) lowerCamelCase__ : Tuple = feature_extractor(__lowerCamelCase , return_tensors="np" ) lowerCamelCase__ : List[str] = processor(__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.get_feature_extractor() lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = self.get_decoder() lowerCamelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Any = "This is a test string" lowerCamelCase__ : Union[str, Any] = processor(text=__lowerCamelCase ) lowerCamelCase__ : List[str] = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any]=(2, 10, 16) , __lowerCamelCase : Any=77 ): '''simple docstring''' np.random.seed(__lowerCamelCase ) return np.random.rand(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_feature_extractor() lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : List[str] = self.get_decoder() lowerCamelCase__ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) lowerCamelCase__ : Optional[Any] = processor.decode(__lowerCamelCase ) lowerCamelCase__ : Tuple = decoder.decode_beams(__lowerCamelCase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("</s> <s> </s>" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_feature_extractor() lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Dict = self.get_decoder() lowerCamelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Tuple = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase__ : Dict = processor.batch_decode(__lowerCamelCase ) else: with get_context(__lowerCamelCase ).Pool() as pool: lowerCamelCase__ : str = processor.batch_decode(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = list(__lowerCamelCase ) with get_context("fork" ).Pool() as p: lowerCamelCase__ : Any = decoder.decode_beams_batch(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__lowerCamelCase , decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text ) self.assertListEqual(__lowerCamelCase , decoded_processor.logit_score ) self.assertListEqual(__lowerCamelCase , decoded_processor.lm_score ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_feature_extractor() lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Any = self.get_decoder() lowerCamelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Dict = self._get_dummy_logits() lowerCamelCase__ : List[Any] = 15 lowerCamelCase__ : List[str] = -2_0.0 lowerCamelCase__ : Optional[Any] = -4.0 lowerCamelCase__ : str = processor.batch_decode( __lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , ) lowerCamelCase__ : Dict = decoded_processor_out.text lowerCamelCase__ : Optional[Any] = list(__lowerCamelCase ) with get_context("fork" ).Pool() as pool: lowerCamelCase__ : List[Any] = decoder.decode_beams_batch( __lowerCamelCase , __lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , ) lowerCamelCase__ : str = [d[0][0] for d in decoded_decoder_out] lowerCamelCase__ : Tuple = [d[0][2] for d in decoded_decoder_out] lowerCamelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __lowerCamelCase ) self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , __lowerCamelCase , atol=1E-3 ) ) self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , __lowerCamelCase , atol=1E-3 ) ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Any = self.get_feature_extractor() lowerCamelCase__ : Union[str, Any] = self.get_tokenizer() lowerCamelCase__ : Optional[int] = self.get_decoder() lowerCamelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) lowerCamelCase__ : Any = self._get_dummy_logits() lowerCamelCase__ : List[str] = 2.0 lowerCamelCase__ : Any = 5.0 lowerCamelCase__ : List[Any] = -2_0.0 lowerCamelCase__ : Tuple = True lowerCamelCase__ : Tuple = processor.batch_decode( __lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , ) lowerCamelCase__ : Optional[Any] = decoded_processor_out.text lowerCamelCase__ : Dict = list(__lowerCamelCase ) decoder.reset_params( alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , ) with get_context("fork" ).Pool() as pool: lowerCamelCase__ : str = decoder.decode_beams_batch( __lowerCamelCase , __lowerCamelCase , ) lowerCamelCase__ : Tuple = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , __lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : List[str] = processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase__ : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() lowerCamelCase__ : int = os.listdir(__lowerCamelCase ) lowerCamelCase__ : List[Any] = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Tuple = snapshot_download("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase__ : List[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() lowerCamelCase__ : Union[str, Any] = os.listdir(__lowerCamelCase ) lowerCamelCase__ : List[str] = os.listdir(__lowerCamelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : Dict = floats_list((3, 1000) ) lowerCamelCase__ : Union[str, Any] = processor_wavaveca(__lowerCamelCase , return_tensors="np" ) lowerCamelCase__ : Union[str, Any] = processor_auto(__lowerCamelCase , return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) lowerCamelCase__ : List[str] = self._get_dummy_logits() lowerCamelCase__ : str = processor_wavaveca.batch_decode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = processor_auto.batch_decode(__lowerCamelCase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : str = self.get_feature_extractor() lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : int = self.get_decoder() lowerCamelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) @staticmethod def lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : str = [d[key] for d in offsets] return retrieved_list def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : List[str] = self._get_dummy_logits()[0] lowerCamelCase__ : List[str] = processor.decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase__ : Any = self._get_dummy_logits() lowerCamelCase__ : Union[str, Any] = processor.batch_decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) ) self.assertListEqual( [" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' import torch lowerCamelCase__ : str = load_dataset("common_voice" , "en" , split="train" , streaming=__lowerCamelCase ) lowerCamelCase__ : str = ds.cast_column("audio" , datasets.Audio(sampling_rate=16000 ) ) lowerCamelCase__ : Dict = iter(__lowerCamelCase ) lowerCamelCase__ : int = next(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) lowerCamelCase__ : str = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase__ : Dict = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values with torch.no_grad(): lowerCamelCase__ : Dict = model(__lowerCamelCase ).logits.cpu().numpy() lowerCamelCase__ : Optional[int] = processor.decode(logits[0] , output_word_offsets=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase__ : List[Any] = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] lowerCamelCase__ : Tuple = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , __lowerCamelCase ) self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , output.text ) # output times lowerCamelCase__ : Tuple = torch.tensor(self.get_from_offsets(__lowerCamelCase , "start_time" ) ) lowerCamelCase__ : List[str] = torch.tensor(self.get_from_offsets(__lowerCamelCase , "end_time" ) ) # fmt: off lowerCamelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase__ : Optional[int] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.0_1 ) ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.0_1 ) )
716
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : Optional[Any] = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
import argparse import datetime def UpperCamelCase__ ( _A : str ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } lowerCamelCase__ : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(snake_case_ ) < 11: raise ValueError("Must be 10 characters long" ) # Get month lowerCamelCase__ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) lowerCamelCase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be \'-\' or \'/\'" ) # Get day lowerCamelCase__ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator lowerCamelCase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be \'-\' or \'/\'" ) # Get year lowerCamelCase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation lowerCamelCase__ : List[str] = datetime.date(int(snake_case_ ) , int(snake_case_ ) , int(snake_case_ ) ) # Start math if m <= 2: lowerCamelCase__ : int = y - 1 lowerCamelCase__ : Dict = m + 12 # maths var lowerCamelCase__ : int = int(str(snake_case_ )[:2] ) lowerCamelCase__ : int = int(str(snake_case_ )[2:] ) lowerCamelCase__ : int = int(2.6 * m - 5.39 ) lowerCamelCase__ : int = int(c / 4 ) lowerCamelCase__ : int = int(k / 4 ) lowerCamelCase__ : int = int(d + k ) lowerCamelCase__ : int = int(t + u + v + x ) lowerCamelCase__ : int = int(z - (2 * c) ) lowerCamelCase__ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response lowerCamelCase__ : str = F"Your date {date_input}, is a {days[str(snake_case_ )]}!" return response if __name__ == "__main__": import doctest doctest.testmod() A : List[str] = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) A : Any = parser.parse_args() zeller(args.date_input)
718
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Union[str, Any]=18 , __lowerCamelCase : Optional[int]=30 , __lowerCamelCase : Dict=400 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowerCamelCase__ : List[str] = size if size is not None else {"shortest_edge": 18} lowerCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCamelCase__ : int = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Union[str, Any] = image_size lowerCamelCase__ : Optional[int] = min_resolution lowerCamelCase__ : List[str] = max_resolution lowerCamelCase__ : str = do_resize lowerCamelCase__ : Tuple = size lowerCamelCase__ : Dict = do_center_crop lowerCamelCase__ : Tuple = crop_size lowerCamelCase__ : Any = do_normalize lowerCamelCase__ : str = image_mean lowerCamelCase__ : Union[str, Any] = image_std def lowerCAmelCase ( self : str ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _lowercase ( lowercase_ , unittest.TestCase): """simple docstring""" A__ = LevitImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = LevitImageProcessingTester(self ) @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def lowerCAmelCase ( self : int ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__ : Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input lowerCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__ : Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
from math import pi def lowercase_ ( _A : Optional[Any] , _A : Tuple ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
720
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowercase_ ( _A : Union[str, Any] , _A : Any=None ): """simple docstring""" lowerCamelCase__ : Optional[Any] = None if token is not None: lowerCamelCase__ : Dict = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} lowerCamelCase__ : List[str] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" lowerCamelCase__ : Tuple = requests.get(_A , headers=_A ).json() lowerCamelCase__ : Any = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) lowerCamelCase__ : Any = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_A ): lowerCamelCase__ : Union[str, Any] = requests.get(url + F"&page={i + 2}" , headers=_A ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase_ ( _A : int , _A : int=None ): """simple docstring""" lowerCamelCase__ : Dict = None if token is not None: lowerCamelCase__ : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} lowerCamelCase__ : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" lowerCamelCase__ : Any = requests.get(_A , headers=_A ).json() lowerCamelCase__ : Tuple = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) lowerCamelCase__ : Tuple = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_A ): lowerCamelCase__ : List[str] = requests.get(url + F"&page={i + 2}" , headers=_A ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowercase_ ( _A : Any , _A : Union[str, Any] , _A : int , _A : int ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = None if token is not None: lowerCamelCase__ : Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} lowerCamelCase__ : Tuple = requests.get(_A , headers=_A , allow_redirects=_A ) lowerCamelCase__ : Dict = result.headers["Location"] lowerCamelCase__ : Tuple = requests.get(_A , allow_redirects=_A ) lowerCamelCase__ : Tuple = os.path.join(_A , F"{artifact_name}.zip" ) with open(_A , "wb" ) as fp: fp.write(response.content ) def lowercase_ ( _A : str , _A : Union[str, Any]=None ): """simple docstring""" lowerCamelCase__ : Optional[Any] = [] lowerCamelCase__ : Any = [] lowerCamelCase__ : Tuple = None with zipfile.ZipFile(_A ) as z: for filename in z.namelist(): if not os.path.isdir(_A ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_A ) as f: for line in f: lowerCamelCase__ : Optional[int] = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowerCamelCase__ : Any = line[: line.index(": " )] lowerCamelCase__ : Union[str, Any] = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed lowerCamelCase__ : Union[str, Any] = line[len("FAILED " ) :] failed_tests.append(_A ) elif filename == "job_name.txt": lowerCamelCase__ : Union[str, Any] = line if len(_A ) != len(_A ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` " F"and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) lowerCamelCase__ : List[Any] = None if job_name and job_links: lowerCamelCase__ : List[Any] = job_links.get(_A , _A ) # A list with elements of the form (line of error, error, failed test) lowerCamelCase__ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(_A , _A )] return result def lowercase_ ( _A : str , _A : str=None ): """simple docstring""" lowerCamelCase__ : str = [] lowerCamelCase__ : List[Any] = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) ) return errors def lowercase_ ( _A : Tuple , _A : Optional[Any]=None ): """simple docstring""" lowerCamelCase__ : List[str] = Counter() counter.update([x[1] for x in logs] ) lowerCamelCase__ : Union[str, Any] = counter.most_common() lowerCamelCase__ : int = {} for error, count in counts: if error_filter is None or error not in error_filter: lowerCamelCase__ : Optional[int] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} lowerCamelCase__ : str = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) ) return r def lowercase_ ( _A : List[str] ): """simple docstring""" lowerCamelCase__ : Any = test.split("::" )[0] if test.startswith("tests/models/" ): lowerCamelCase__ : List[Any] = test.split("/" )[2] else: lowerCamelCase__ : int = None return test def lowercase_ ( _A : Optional[int] , _A : Union[str, Any]=None ): """simple docstring""" lowerCamelCase__ : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] lowerCamelCase__ : Optional[Any] = [x for x in logs if x[2] is not None] lowerCamelCase__ : Optional[Any] = {x[2] for x in logs} lowerCamelCase__ : Tuple = {} for test in tests: lowerCamelCase__ : Any = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) lowerCamelCase__ : Optional[Any] = counter.most_common() lowerCamelCase__ : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowerCamelCase__ : Any = sum(error_counts.values() ) if n_errors > 0: lowerCamelCase__ : int = {"count": n_errors, "errors": error_counts} lowerCamelCase__ : Dict = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) ) return r def lowercase_ ( _A : List[str] ): """simple docstring""" lowerCamelCase__ : Any = "| no. | error | status |" lowerCamelCase__ : Optional[Any] = "|-:|:-|:-|" lowerCamelCase__ : Union[str, Any] = [header, sep] for error in reduced_by_error: lowerCamelCase__ : Optional[Any] = reduced_by_error[error]["count"] lowerCamelCase__ : Union[str, Any] = F"| {count} | {error[:100]} | |" lines.append(_A ) return "\n".join(_A ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Any = "| model | no. of errors | major error | count |" lowerCamelCase__ : Union[str, Any] = "|-:|-:|-:|-:|" lowerCamelCase__ : Union[str, Any] = [header, sep] for model in reduced_by_model: lowerCamelCase__ : int = reduced_by_model[model]["count"] lowerCamelCase__ , lowerCamelCase__ : Dict = list(reduced_by_model[model]["errors"].items() )[0] lowerCamelCase__ : Union[str, Any] = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(_A ) return "\n".join(_A ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") A : Tuple = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) A : Optional[int] = get_job_links(args.workflow_run_id, token=args.token) A : Union[str, Any] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: A : Optional[Any] = k.find(" / ") A : Any = k[index + len(" / ") :] A : Optional[Any] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) A : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) A : Any = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error A : Tuple = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors A : str = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) A : str = reduce_by_error(errors) A : Tuple = reduce_by_model(errors) A : Union[str, Any] = make_github_table(reduced_by_error) A : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
721
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
0
def lowercase_ ( _A : str ): lowerCamelCase__ : int = len(_A ) while cur > 1: # Find the maximum number in arr lowerCamelCase__ : Any = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCamelCase__ : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list lowerCamelCase__ : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": A : str = input("Enter numbers separated by a comma:\n").strip() A : Any = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
700
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
0
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
701
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A : List[str] = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
702
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : List[str] = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
703
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
0
def lowercase_ ( _A : int = 2000000 ): """simple docstring""" lowerCamelCase__ : Optional[Any] = [0 for i in range(n + 1 )] lowerCamelCase__ : Union[str, Any] = 1 lowerCamelCase__ : Dict = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , _A ): lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Dict = 0 for i in range(_A ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
704
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
0
from manim import * class _lowercase ( lowercase__): """simple docstring""" def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[str] = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase__ : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )] lowerCamelCase__ : Dict = [mem.copy() for i in range(6 )] lowerCamelCase__ : Union[str, Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : Optional[Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : Optional[Any] = Text("CPU" , font_size=24 ) lowerCamelCase__ : int = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCamelCase__ : List[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : Optional[Any] = Text("GPU" , font_size=24 ) lowerCamelCase__ : int = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )] lowerCamelCase__ : int = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : Any = Text("Model" , font_size=24 ) lowerCamelCase__ : List[str] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) lowerCamelCase__ : Dict = [mem.copy() for i in range(6 )] lowerCamelCase__ : str = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) lowerCamelCase__ : List[Any] = Text("Loaded Checkpoint" , font_size=24 ) lowerCamelCase__ : Any = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowerCamelCase__ : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ : Union[str, Any] = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Any = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowerCamelCase__ : Optional[int] = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) lowerCamelCase__ : str = [] lowerCamelCase__ : Tuple = [] for i, rect in enumerate(__lowerCamelCase ): lowerCamelCase__ : int = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) lowerCamelCase__ : Optional[int] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
705
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
0
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) (lowerCamelCase__) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
707
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
708
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging A : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _A : int ): """simple docstring""" lowerCamelCase__ : Tuple = r"\w+[.]\d+" lowerCamelCase__ : Optional[Any] = re.findall(_A , _A ) for pat in pats: lowerCamelCase__ : Tuple = key.replace(_A , "_".join(pat.split("." ) ) ) return key def lowercase_ ( _A : Any , _A : int , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Tuple = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ : Tuple = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ : Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ : Dict = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ : Optional[int] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ : str = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase_ ( _A : Any , _A : str , _A : str=42 ): """simple docstring""" lowerCamelCase__ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ : List[Any] = flax_model.init_weights(PRNGKey(_A ) ) lowerCamelCase__ : int = flatten_dict(_A ) lowerCamelCase__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ : Any = rename_key(_A ) lowerCamelCase__ : List[str] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCamelCase__ : List[str] = rename_key_and_reshape_tensor(_A , _A , _A ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCamelCase__ : Optional[Any] = jnp.asarray(_A ) return unflatten_dict(_A )
709
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class _lowercase : """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=33 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=None , ): '''simple docstring''' lowerCamelCase__ : str = parent lowerCamelCase__ : List[str] = batch_size lowerCamelCase__ : Union[str, Any] = seq_length lowerCamelCase__ : Tuple = is_training lowerCamelCase__ : Any = use_input_mask lowerCamelCase__ : Union[str, Any] = use_token_type_ids lowerCamelCase__ : Any = use_labels lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : int = type_sequence_label_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : Any = num_labels lowerCamelCase__ : int = num_choices lowerCamelCase__ : int = scope def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Dict = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = EsmModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : str = EsmForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_labels lowerCamelCase__ : Optional[int] = EsmForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.prepare_config_and_inputs() ( lowerCamelCase__ ) : Any = config_and_inputs lowerCamelCase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = False A__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) A__ = () A__ = ( { "feature-extraction": EsmModel, "fill-mask": EsmForMaskedLM, "text-classification": EsmForSequenceClassification, "token-classification": EsmForTokenClassification, "zero-shot": EsmForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = EsmModelTester(self ) lowerCamelCase__ : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : List[Any] = type self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Optional[Any] = EsmModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase__ : List[str] = EsmEmbeddings(config=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase__ : Dict = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase__ : Any = create_position_ids_from_input_ids(__lowerCamelCase , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase__ : Any = EsmEmbeddings(config=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = torch.empty(2 , 4 , 30 ) lowerCamelCase__ : Union[str, Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase__ : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(__lowerCamelCase ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) ) @unittest.skip("Esm does not support embedding resizing" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' pass @require_torch class _lowercase ( lowercase__): """simple docstring""" @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' with torch.no_grad(): lowerCamelCase__ : Optional[Any] = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) model.eval() lowerCamelCase__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : int = model(__lowerCamelCase )[0] lowerCamelCase__ : Dict = 33 lowerCamelCase__ : int = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Any = torch.tensor( [[[8.9_2_1_5, -10.5898, -6.4_6_7_1], [-6.3_9_6_7, -13.9114, -1.1_2_1_2], [-7.7_8_1_2, -13.9516, -3.7_4_0_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' with torch.no_grad(): lowerCamelCase__ : int = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) model.eval() lowerCamelCase__ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )[0] # compare the actual values for a slice. lowerCamelCase__ : Optional[Any] = torch.tensor( [[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
710
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaControlnetImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image", "hint"] A__ = ["image_embeds", "negative_image_embeds", "image", "hint"] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : str ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : int ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Any = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : str = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Any = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.dummy_unet lowerCamelCase__ : Union[str, Any] = self.dummy_movq lowerCamelCase__ : Optional[Any] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Dict = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=0 ): '''simple docstring''' lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) # create hint lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : List[str] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : str = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Any = "cpu" lowerCamelCase__ : Any = self.get_dummy_components() lowerCamelCase__ : Union[str, Any] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : str = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : str = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : int = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Any = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : List[str] = init_image.resize((512, 512) ) lowerCamelCase__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) lowerCamelCase__ : Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 255.0 lowerCamelCase__ : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowerCamelCase__ : Optional[Any] = "A robot, 4k photo" lowerCamelCase__ : str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) lowerCamelCase__ : List[Any] = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , image=__lowerCamelCase , strength=0.8_5 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Dict = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , ) lowerCamelCase__ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
711
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: A : int = None A : Optional[int] = logging.get_logger(__name__) A : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} A : Tuple = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } A : Dict = { "xlnet-base-cased": None, "xlnet-large-cased": None, } A : List[Any] = "▁" # Segments (not really needed) A : Tuple = 0 A : int = 1 A : Dict = 2 A : List[str] = 3 A : Optional[Any] = 4 class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = "left" A__ = XLNetTokenizer def __init__( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Optional[int]="<sep>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Tuple="<cls>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : int=["<eop>", "<eod>"] , **__lowerCamelCase : Union[str, Any] , ): '''simple docstring''' lowerCamelCase__ : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) lowerCamelCase__ : Optional[int] = 3 lowerCamelCase__ : List[str] = do_lower_case lowerCamelCase__ : List[str] = remove_space lowerCamelCase__ : Any = keep_accents lowerCamelCase__ : int = vocab_file lowerCamelCase__ : str = False if not self.vocab_file else True def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : List[str] = [self.sep_token_id] lowerCamelCase__ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
712
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : int , __lowerCamelCase : Any ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): lowerCamelCase__ : List[Any] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = "sshleifer/tiny-gpt2" lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : int = "sgugger/tiny-distilbert-classification" lowerCamelCase__ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) lowerCamelCase__ : Optional[Any] = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : List[str] = "sshleifer/tiny-gpt2" lowerCamelCase__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : str = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = "sshleifer/tiny-gpt2" lowerCamelCase__ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Optional[int] = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = "sshleifer/tiny-gpt2" lowerCamelCase__ : Any = AutoConfig.from_pretrained(__lowerCamelCase ) # set architectures equal to `None` lowerCamelCase__ : Any = None lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Optional[int] = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) lowerCamelCase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : str = "sshleifer/tiny-gpt2" lowerCamelCase__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Optional[Any] = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = "sshleifer/tiny-gpt2" lowerCamelCase__ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Any = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = "sshleifer/tiny-gpt2" lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) lowerCamelCase__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = "sshleifer/tinier_bart" lowerCamelCase__ : Dict = AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : str = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) lowerCamelCase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = "sshleifer/tiny-gpt2" lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Any = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) lowerCamelCase__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "sshleifer/tinier_bart" lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) lowerCamelCase__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv" ) , multi_process=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv" ) ).exists() ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase : str ): self.assertTrue(hasattr(__lowerCamelCase , "sequential" ) ) self.assertTrue(hasattr(__lowerCamelCase , "cumulative" ) ) self.assertTrue(hasattr(__lowerCamelCase , "current" ) ) self.assertTrue(hasattr(__lowerCamelCase , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt" ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowerCamelCase__ : str = PyTorchBenchmark(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt" ) ).exists() )
713
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
0
import os def lowercase_ ( ) -> Union[str, Any]: """simple docstring""" with open(os.path.dirname(_A ) + "/p022_names.txt" ) as file: lowerCamelCase__ : List[Any] = str(file.readlines()[0] ) lowerCamelCase__ : List[Any] = names.replace("\"" , "" ).split("," ) names.sort() lowerCamelCase__ : Optional[Any] = 0 lowerCamelCase__ : Dict = 0 for i, name in enumerate(_A ): for letter in name: name_score += ord(_A ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Optional[int] = 0 return total_score if __name__ == "__main__": print(solution())
714
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor A : Union[str, Any] = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ): '''simple docstring''' warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
import pytest A : Tuple = "__dummy_dataset1__" A : str = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( _A : Optional[Any] , _A : List[str] , _A : Dict ): """simple docstring""" lowerCamelCase__ : int = dataset_loading_script_name lowerCamelCase__ : Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=_A ) lowerCamelCase__ : int = script_dir / F"{script_name}.py" with open(_A , "w" ) as f: f.write(_A ) return str(_A )
716
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowercase_ ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ): """simple docstring""" lowerCamelCase__ : Optional[int] = coefficient_matrix.shape lowerCamelCase__ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: lowerCamelCase__ : Dict = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(_A ) if colsa != 1: lowerCamelCase__ : Optional[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(_A ) if rowsa != rowsa: lowerCamelCase__ : Union[str, Any] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(_A ) if len(_A ) != rowsa: lowerCamelCase__ : Union[str, Any] = ( "Number of initial values must be equal to number of rows in coefficient " F"matrix but received {len(_A )} and {rowsa}" ) raise ValueError(_A ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) lowerCamelCase__ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowerCamelCase__ : Optional[Any] = table.shape strictly_diagonally_dominant(_A ) # Iterates the whole matrix for given number of times for _ in range(_A ): lowerCamelCase__ : Tuple = [] for row in range(_A ): lowerCamelCase__ : List[str] = 0 for col in range(_A ): if col == row: lowerCamelCase__ : Union[str, Any] = table[row][col] elif col == cols - 1: lowerCamelCase__ : Tuple = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowerCamelCase__ : str = (temp + val) / denom new_val.append(_A ) lowerCamelCase__ : Optional[Any] = new_val return [float(_A ) for i in new_val] def lowercase_ ( _A : NDArray[floataa] ): """simple docstring""" lowerCamelCase__ : Optional[Any] = table.shape lowerCamelCase__ : List[str] = True for i in range(0 , _A ): lowerCamelCase__ : Dict = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging A : Optional[Any] = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" A__ = ["input_values", "padding_mask"] def __init__( self : Any , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 24000 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = None , __lowerCamelCase : float = None , **__lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Tuple = chunk_length_s lowerCamelCase__ : List[str] = overlap @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : str , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs lowerCamelCase__ : int = True lowerCamelCase__ : str = bool( isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ : List[str] = [np.asarray(__lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ): lowerCamelCase__ : Dict = np.asarray(__lowerCamelCase , dtype=np.floataa ) elif isinstance(__lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowerCamelCase__ : Any = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ : Any = [np.asarray(__lowerCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__lowerCamelCase ): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" ) lowerCamelCase__ : List[Any] = None lowerCamelCase__ : Union[str, Any] = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowerCamelCase__ : Optional[int] = min(array.shape[0] for array in raw_audio ) lowerCamelCase__ : Any = int(np.floor(max_length / self.chunk_stride ) ) lowerCamelCase__ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowerCamelCase__ : Optional[int] = max(array.shape[0] for array in raw_audio ) lowerCamelCase__ : Any = int(np.ceil(max_length / self.chunk_stride ) ) lowerCamelCase__ : Tuple = (nb_step - 1) * self.chunk_stride + self.chunk_length lowerCamelCase__ : Optional[Any] = "max_length" else: lowerCamelCase__ : Any = input_values # normal padding on batch if padded_inputs is None: lowerCamelCase__ : List[str] = self.pad( __lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , padding=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) if padding: lowerCamelCase__ : Dict = padded_inputs.pop("attention_mask" ) lowerCamelCase__ : Any = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: lowerCamelCase__ : str = example[..., None] input_values.append(example.T ) lowerCamelCase__ : str = input_values if return_tensors is not None: lowerCamelCase__ : Dict = padded_inputs.convert_to_tensors(__lowerCamelCase ) return padded_inputs
718
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Optional[int] = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() A : int = logging.get_logger(__name__) A : Dict = ["model.decoder.embed_positions.weights"] def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if "emb" in name: lowerCamelCase__ : Tuple = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: lowerCamelCase__ : Dict = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: lowerCamelCase__ : List[str] = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: lowerCamelCase__ : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: lowerCamelCase__ : List[str] = name.replace("linear2" , "fc2" ) if "norm1" in name: lowerCamelCase__ : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: lowerCamelCase__ : Optional[int] = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: lowerCamelCase__ : Tuple = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: lowerCamelCase__ : Tuple = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: lowerCamelCase__ : str = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCamelCase__ : List[str] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase_ ( _A : OrderedDict , _A : int ): """simple docstring""" lowerCamelCase__ : int = list(state_dict.keys() ) lowerCamelCase__ : Optional[Any] = {} for key in keys: lowerCamelCase__ : Optional[Any] = state_dict.pop(_A ) lowerCamelCase__ : Any = rename_keys(_A ) if "in_proj_weight" in key: # split fused qkv proj lowerCamelCase__ : List[str] = val[:hidden_size, :] lowerCamelCase__ : str = val[hidden_size : 2 * hidden_size, :] lowerCamelCase__ : List[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCamelCase__ : str = val else: lowerCamelCase__ : Dict = val return state_dict, enc_dec_proj_state_dict def lowercase_ ( _A : str ): """simple docstring""" if checkpoint == "small": # default config values lowerCamelCase__ : Union[str, Any] = 1024 lowerCamelCase__ : List[Any] = 24 lowerCamelCase__ : int = 16 elif checkpoint == "medium": lowerCamelCase__ : Tuple = 1536 lowerCamelCase__ : Union[str, Any] = 48 lowerCamelCase__ : Optional[Any] = 24 elif checkpoint == "large": lowerCamelCase__ : Any = 2048 lowerCamelCase__ : Optional[Any] = 48 lowerCamelCase__ : Tuple = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) lowerCamelCase__ : Tuple = MusicgenDecoderConfig( hidden_size=_A , ffn_dim=hidden_size * 4 , num_hidden_layers=_A , num_attention_heads=_A , ) return config @torch.no_grad() def lowercase_ ( _A : List[str] , _A : Any=None , _A : Dict=None , _A : str="cpu" ): """simple docstring""" lowerCamelCase__ : Dict = MusicGen.get_pretrained(_A , device=_A ) lowerCamelCase__ : Optional[Any] = decoder_config_from_checkpoint(_A ) lowerCamelCase__ : str = fairseq_model.lm.state_dict() lowerCamelCase__ : Optional[Any] = rename_state_dict( _A , hidden_size=decoder_config.hidden_size ) lowerCamelCase__ : Union[str, Any] = TaEncoderModel.from_pretrained("t5-base" ) lowerCamelCase__ : Dict = EncodecModel.from_pretrained("facebook/encodec_32khz" ) lowerCamelCase__ : List[Any] = MusicgenForCausalLM(_A ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCamelCase__ : Optional[int] = decoder.load_state_dict(_A , strict=_A ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_A ) if len(_A ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(_A ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model lowerCamelCase__ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=_A , audio_encoder=_A , decoder=_A ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_A ) # check we can do a forward pass lowerCamelCase__ : Optional[int] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCamelCase__ : Any = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(input_ids=_A , decoder_input_ids=_A ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("t5-base" ) lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) lowerCamelCase__ : List[str] = MusicgenProcessor(feature_extractor=_A , tokenizer=_A ) # set the appropriate bos/pad token ids lowerCamelCase__ : Union[str, Any] = 2048 lowerCamelCase__ : Dict = 2048 # set other default generation config params lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate ) lowerCamelCase__ : Dict = True lowerCamelCase__ : int = 3.0 if pytorch_dump_folder is not None: Path(_A ).mkdir(exist_ok=_A ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(_A ) processor.push_to_hub(_A ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) A : Optional[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
720
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
0
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder A : Optional[int] = datasets.utils.logging.get_logger(__name__) class _lowercase ( folder_based_builder.FolderBasedBuilderConfig): """simple docstring""" A__ = None A__ = None class _lowercase ( folder_based_builder.FolderBasedBuilder): """simple docstring""" A__ = datasets.Audio() A__ = "audio" A__ = AudioFolderConfig A__ = 42 # definition at the bottom of the script A__ = AudioClassification(audio_column="audio" , label_column="label") A : Any = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] A : Dict = AUDIO_EXTENSIONS
721
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : """simple docstring""" def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=[10, 20, 30, 40] , __lowerCamelCase : List[str]=[2, 2, 3, 2] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=10 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Any=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : str = image_size lowerCamelCase__ : str = num_channels lowerCamelCase__ : Optional[Any] = num_stages lowerCamelCase__ : Optional[int] = hidden_sizes lowerCamelCase__ : Union[str, Any] = depths lowerCamelCase__ : Any = is_training lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Tuple = type_sequence_label_size lowerCamelCase__ : Tuple = initializer_range lowerCamelCase__ : Tuple = out_features lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : int = num_stages def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = None if self.use_labels: lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : int ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowerCAmelCase ( self : str ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : Any = UperNetForSemanticSegmentation(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : int = model(__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[str] = self.prepare_config_and_inputs() ( lowerCamelCase__ ) : Optional[int] = config_and_inputs lowerCamelCase__ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () A__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} A__ = False A__ = False A__ = False A__ = False A__ = False A__ = False def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : int = UperNetModelTester(self ) lowerCamelCase__ : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] = model_class(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not have a base model" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not have a base model" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): lowerCamelCase__ : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) lowerCamelCase__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] = _config_zero_init(__lowerCamelCase ) lowerCamelCase__ : Any = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip(reason="UperNet does not have tied weights" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowercase_ ( ): lowerCamelCase__ : Optional[int] = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase__ : List[str] = Image.open(_A ).convert("RGB" ) return image @require_torch @require_vision @slow class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase__ : int = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__lowerCamelCase ) lowerCamelCase__ : str = prepare_img() lowerCamelCase__ : Any = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : str = model(**__lowerCamelCase ) lowerCamelCase__ : Any = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : str = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Union[str, Any] = model(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
700
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Dict = { "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : str = logging.get_logger(__name__) A : Dict = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "data2vec-vision" def __init__( self : Dict , __lowerCamelCase : List[str]=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Dict=1E-1_2 , __lowerCamelCase : str=224 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=[3, 5, 7, 11] , __lowerCamelCase : str=[1, 2, 3, 6] , __lowerCamelCase : int=True , __lowerCamelCase : Any=0.4 , __lowerCamelCase : Union[str, Any]=256 , __lowerCamelCase : int=1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=255 , **__lowerCamelCase : Any , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : List[str] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : List[Any] = image_size lowerCamelCase__ : Tuple = patch_size lowerCamelCase__ : List[str] = num_channels lowerCamelCase__ : List[str] = use_mask_token lowerCamelCase__ : Tuple = use_absolute_position_embeddings lowerCamelCase__ : Optional[int] = use_relative_position_bias lowerCamelCase__ : str = use_shared_relative_position_bias lowerCamelCase__ : List[str] = layer_scale_init_value lowerCamelCase__ : List[str] = drop_path_rate lowerCamelCase__ : Optional[int] = use_mean_pooling # decode head attributes (semantic segmentation) lowerCamelCase__ : Dict = out_indices lowerCamelCase__ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) lowerCamelCase__ : str = use_auxiliary_head lowerCamelCase__ : Optional[int] = auxiliary_loss_weight lowerCamelCase__ : List[str] = auxiliary_channels lowerCamelCase__ : List[str] = auxiliary_num_convs lowerCamelCase__ : str = auxiliary_concat_input lowerCamelCase__ : Any = semantic_loss_ignore_index class _lowercase ( lowercase__): """simple docstring""" A__ = version.parse("1.11") @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 1E-4
702
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
0
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() lowerCamelCase__ : List[str] = nn.ModuleList(__lowerCamelCase ) def lowerCAmelCase ( self : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : torch.Tensor , __lowerCamelCase : List[torch.tensor] , __lowerCamelCase : List[float] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCamelCase , __lowerCamelCase , self.nets ) ): lowerCamelCase__ : int = controlnet( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) # merge samples if i == 0: lowerCamelCase__ : Union[str, Any] = down_samples, mid_sample else: lowerCamelCase__ : Tuple = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCamelCase , __lowerCamelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : bool = True , __lowerCamelCase : Callable = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , ): '''simple docstring''' lowerCamelCase__ : Any = 0 lowerCamelCase__ : Tuple = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCamelCase , is_main_process=__lowerCamelCase , save_function=__lowerCamelCase , safe_serialization=__lowerCamelCase , variant=__lowerCamelCase , ) idx += 1 lowerCamelCase__ : Optional[Any] = model_path_to_save + f"_{idx}" @classmethod def lowerCAmelCase ( cls : Dict , __lowerCamelCase : Optional[Union[str, os.PathLike]] , **__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : List[str] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCamelCase__ : int = pretrained_model_path while os.path.isdir(__lowerCamelCase ): lowerCamelCase__ : int = ControlNetModel.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) controlnets.append(__lowerCamelCase ) idx += 1 lowerCamelCase__ : int = pretrained_model_path + f"_{idx}" logger.info(f"{len(__lowerCamelCase )} controlnets loaded from {pretrained_model_path}." ) if len(__lowerCamelCase ) == 0: raise ValueError( f"No ControlNets found under {os.path.dirname(__lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(__lowerCamelCase )
703
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
0
from typing import Dict from .base import GenericTensor, Pipeline class _lowercase ( lowercase__): """simple docstring""" def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int ): '''simple docstring''' if tokenize_kwargs is None: lowerCamelCase__ : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) lowerCamelCase__ : List[str] = truncation lowerCamelCase__ : Optional[Any] = tokenize_kwargs lowerCamelCase__ : Optional[Any] = {} if return_tensors is not None: lowerCamelCase__ : str = return_tensors return preprocess_params, {}, postprocess_params def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.framework lowerCamelCase__ : List[Any] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) return model_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model(**__lowerCamelCase ) return model_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ): '''simple docstring''' return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
704
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase_ ( _A : Dict ): """simple docstring""" return EnvironmentCommand() def lowercase_ ( _A : List[Any] ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class _lowercase ( lowercase__): """simple docstring""" @staticmethod def lowerCAmelCase ( __lowerCamelCase : ArgumentParser ): '''simple docstring''' lowerCamelCase__ : Any = parser.add_parser("env" ) download_parser.set_defaults(func=__lowerCamelCase ) download_parser.add_argument( "--accelerate-config_file" , default=__lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=__lowerCamelCase ) def __init__( self : Dict , __lowerCamelCase : int , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : int = accelerate_config_file def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = "not installed" if is_safetensors_available(): import safetensors lowerCamelCase__ : Tuple = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors lowerCamelCase__ : str = f"{safetensors.__version__} but is ignored because of PyTorch version too old." lowerCamelCase__ : Tuple = "not installed" lowerCamelCase__ : List[str] = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file lowerCamelCase__ : Union[str, Any] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__lowerCamelCase ): lowerCamelCase__ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() lowerCamelCase__ : Any = ( "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else f"\t{accelerate_config}" ) lowerCamelCase__ : List[Any] = "not installed" lowerCamelCase__ : Optional[int] = "NA" if is_torch_available(): import torch lowerCamelCase__ : Tuple = torch.__version__ lowerCamelCase__ : Optional[Any] = torch.cuda.is_available() lowerCamelCase__ : List[str] = "not installed" lowerCamelCase__ : Dict = "NA" if is_tf_available(): import tensorflow as tf lowerCamelCase__ : List[str] = tf.__version__ try: # deprecated in v2.1 lowerCamelCase__ : Union[str, Any] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool lowerCamelCase__ : Union[str, Any] = bool(tf.config.list_physical_devices("GPU" ) ) lowerCamelCase__ : int = "not installed" lowerCamelCase__ : List[Any] = "not installed" lowerCamelCase__ : int = "not installed" lowerCamelCase__ : Tuple = "NA" if is_flax_available(): import flax import jax import jaxlib lowerCamelCase__ : List[str] = flax.__version__ lowerCamelCase__ : Tuple = jax.__version__ lowerCamelCase__ : Tuple = jaxlib.__version__ lowerCamelCase__ : Any = jax.lib.xla_bridge.get_backend().platform lowerCamelCase__ : Union[str, Any] = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f"{safetensors_version}", "Accelerate version": f"{accelerate_version}", "Accelerate config": f"{accelerate_config_str}", "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})", "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})", "Jax version": f"{jax_version}", "JaxLib version": f"{jaxlib_version}", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(__lowerCamelCase ) ) return info @staticmethod def lowerCAmelCase ( __lowerCamelCase : int ): '''simple docstring''' return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
705
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset A : Tuple = "bert-base-cased" A : int = "google/pegasus-xsum" A : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."] A : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] A : int = "patrickvonplaten/t5-tiny-random" A : Optional[int] = "sshleifer/bart-tiny-random" A : Any = "sshleifer/tiny-mbart" A : Dict = "sshleifer/tiny-marian-en-de" def lowercase_ ( _A : Path , _A : list ): """simple docstring""" lowerCamelCase__ : Optional[Any] = "\n".join(_A ) Path(_A ).open("w" ).writelines(_A ) def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(_A , F"{split}.source" ) , _A ) _dump_articles(os.path.join(_A , F"{split}.target" ) , _A ) return tmp_dir class _lowercase ( lowercase__): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : List[str] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCamelCase__ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCamelCase__ : Optional[int] = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , ) lowerCamelCase__ : List[Any] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCamelCase__ : List[str] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCamelCase__ : Tuple = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) lowerCamelCase__ : Any = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) lowerCamelCase__ : Optional[Any] = 4 lowerCamelCase__ : int = LegacySeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=20 , max_target_length=__lowerCamelCase , ) lowerCamelCase__ : Tuple = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCamelCase__ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCamelCase__ : int = tmp_dir.joinpath("train.source" ).open().readlines() lowerCamelCase__ : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__lowerCamelCase , __lowerCamelCase , 128 , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = {x.name for x in tmp_dir.iterdir()} lowerCamelCase__ : List[Any] = {x.name for x in save_dir.iterdir()} lowerCamelCase__ : Union[str, Any] = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__lowerCamelCase ) < len(__lowerCamelCase ) assert len(__lowerCamelCase ) == 1 assert len(packed_examples[0] ) == sum(len(__lowerCamelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return lowerCamelCase__ : Any = self._get_dataset(max_len=64 ) lowerCamelCase__ : int = 64 lowerCamelCase__ : int = ds.make_dynamic_sampler(__lowerCamelCase , required_batch_size_multiple=__lowerCamelCase ) lowerCamelCase__ : int = [len(__lowerCamelCase ) for x in batch_sampler] assert len(set(__lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__lowerCamelCase ) == len(__lowerCamelCase ) # no dropped or added examples lowerCamelCase__ : int = DataLoader(__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) lowerCamelCase__ : Dict = [] lowerCamelCase__ : Optional[int] = [] for batch in data_loader: lowerCamelCase__ : int = batch["input_ids"].shape lowerCamelCase__ : Dict = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCamelCase__ : Any = np.product(batch["input_ids"].shape ) num_src_per_batch.append(__lowerCamelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__lowerCamelCase ) assert num_src_per_batch[0] == max(__lowerCamelCase ) if failures: raise AssertionError(f"too many tokens in {len(__lowerCamelCase )} batches" ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = self._get_dataset(max_len=512 ) lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Any = ds.make_sortish_sampler(__lowerCamelCase , shuffle=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) lowerCamelCase__ : List[Any] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCamelCase ) lowerCamelCase__ : Dict = tokenizer.pad_token_id def count_pad_tokens(__lowerCamelCase : int , __lowerCamelCase : List[str]="input_ids" ): return [batch[k].eq(__lowerCamelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) < sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) assert sum(count_pad_tokens(__lowerCamelCase ) ) < sum(count_pad_tokens(__lowerCamelCase ) ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any]=1000 , __lowerCamelCase : Union[str, Any]=128 ): '''simple docstring''' if os.getenv("USE_REAL_DATA" , __lowerCamelCase ): lowerCamelCase__ : Optional[int] = "examples/seq2seq/wmt_en_ro" lowerCamelCase__ : Optional[int] = max_len * 2 * 64 if not Path(__lowerCamelCase ).joinpath("train.len" ).exists(): save_len_file(__lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : Dict = "examples/seq2seq/test_data/wmt_en_ro" lowerCamelCase__ : List[Any] = max_len * 4 save_len_file(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : int = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , n_obs=__lowerCamelCase , ) return ds, max_tokens, tokenizer def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._get_dataset() lowerCamelCase__ : str = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCamelCase ) ) lowerCamelCase__ : Tuple = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCamelCase ) ) assert idsa.intersection(__lowerCamelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCAmelCase ( self : int , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase ) if tok_name == MBART_TINY: lowerCamelCase__ : Tuple = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCamelCase__ : List[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCamelCase__ : List[str] = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__lowerCamelCase ) == 1 if tok_name == BART_TINY else len(__lowerCamelCase ) == 0
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A : List[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ["CLIPFeatureExtractor"] A : Any = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' return f"gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : List[Any]=(4, 4, 64, 64) , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Any = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return image def lowerCAmelCase ( self : str , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' lowerCamelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Optional[int] = "bf16" if fpaa else None lowerCamelCase__ : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( __lowerCamelCase , subfolder="unet" , dtype=__lowerCamelCase , revision=__lowerCamelCase ) return model, params def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple=0 , __lowerCamelCase : List[Any]=(4, 77, 768) , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = self.get_latents(__lowerCamelCase , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = self.get_encoder_hidden_states(__lowerCamelCase , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase__ : List[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : str = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__lowerCamelCase ) lowerCamelCase__ : Dict = self.get_latents(__lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCamelCase ) lowerCamelCase__ : str = self.get_encoder_hidden_states(__lowerCamelCase , shape=(4, 77, 1024) , fpaa=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape lowerCamelCase__ : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase__ : Optional[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
708
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A : Dict = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
0
from __future__ import annotations from collections.abc import Callable A : Dict = list[list[float | int]] def lowercase_ ( _A : Matrix , _A : Matrix ): """simple docstring""" lowerCamelCase__ : int = len(_A ) lowerCamelCase__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_A )] lowerCamelCase__ : int lowerCamelCase__ : int lowerCamelCase__ : int lowerCamelCase__ : int lowerCamelCase__ : int lowerCamelCase__ : float for row in range(_A ): for col in range(_A ): lowerCamelCase__ : Tuple = matrix[row][col] lowerCamelCase__ : List[str] = vector[row][0] lowerCamelCase__ : int = 0 lowerCamelCase__ : List[Any] = 0 while row < size and col < size: # pivoting lowerCamelCase__ : List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A , _A ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: lowerCamelCase__ : List[Any] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _A ): lowerCamelCase__ : Union[str, Any] = augmented[rowa][col] / augmented[row][col] lowerCamelCase__ : int = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _A ): for row in range(_A ): lowerCamelCase__ : Any = augmented[row][col] / augmented[col][col] for cola in range(_A , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_A ) ] def lowercase_ ( _A : list[int] ): """simple docstring""" lowerCamelCase__ : int = len(_A ) lowerCamelCase__ : Matrix = [[0 for _ in range(_A )] for _ in range(_A )] lowerCamelCase__ : Matrix = [[0] for _ in range(_A )] lowerCamelCase__ : Matrix lowerCamelCase__ : int lowerCamelCase__ : int lowerCamelCase__ : int for x_val, y_val in enumerate(_A ): for col in range(_A ): lowerCamelCase__ : List[Any] = (x_val + 1) ** (size - col - 1) lowerCamelCase__ : str = y_val lowerCamelCase__ : Optional[int] = solve(_A , _A ) def interpolated_func(_A : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_A ) ) return interpolated_func def lowercase_ ( _A : int ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def lowercase_ ( _A : Callable[[int], int] = question_function , _A : int = 10 ): """simple docstring""" lowerCamelCase__ : list[int] = [func(_A ) for x_val in range(1 , order + 1 )] lowerCamelCase__ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] lowerCamelCase__ : int = 0 lowerCamelCase__ : Callable[[int], int] lowerCamelCase__ : int for poly in polynomials: lowerCamelCase__ : Any = 1 while func(_A ) == poly(_A ): x_val += 1 ret += poly(_A ) return ret if __name__ == "__main__": print(f'{solution() = }')
710
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowercase_ ( _A : dict ): """simple docstring""" return (data["data"], data["target"]) def lowercase_ ( _A : np.ndarray , _A : np.ndarray ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = XGBClassifier() classifier.fit(_A , _A ) return classifier def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = load_iris() lowerCamelCase__ : List[str] = data_handling(_A ) lowerCamelCase__ : List[Any] = train_test_split( _A , _A , test_size=0.25 ) lowerCamelCase__ : List[Any] = iris["target_names"] # Create an XGBoost Classifier from the training data lowerCamelCase__ : str = xgboost(_A , _A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _A , _A , _A , display_labels=_A , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
711
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : KarrasDiffusionSchedulers , __lowerCamelCase : Optional[Dict[int, str]] = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase ) # create a imagenet -> id dictionary for easier use lowerCamelCase__ : Dict = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): lowerCamelCase__ : Dict = int(__lowerCamelCase ) lowerCamelCase__ : str = dict(sorted(self.labels.items() ) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, List[str]] ): '''simple docstring''' if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] = list(__lowerCamelCase ) for l in label: if l not in self.labels: raise ValueError( f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : float = 4.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : str = len(__lowerCamelCase ) lowerCamelCase__ : Dict = self.transformer.config.sample_size lowerCamelCase__ : Tuple = self.transformer.config.in_channels lowerCamelCase__ : Union[str, Any] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , ) lowerCamelCase__ : Union[str, Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowerCamelCase__ : List[Any] = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 ) lowerCamelCase__ : str = torch.tensor([1000] * batch_size , device=self.device ) lowerCamelCase__ : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowerCamelCase__ : int = latent_model_input[: len(__lowerCamelCase ) // 2] lowerCamelCase__ : Tuple = torch.cat([half, half] , dim=0 ) lowerCamelCase__ : Union[str, Any] = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = t if not torch.is_tensor(__lowerCamelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowerCamelCase__ : List[Any] = latent_model_input.device.type == "mps" if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : List[str] = torch.floataa if is_mps else torch.floataa else: lowerCamelCase__ : Any = torch.intaa if is_mps else torch.intaa lowerCamelCase__ : Dict = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowerCamelCase__ : Any = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase__ : Any = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowerCamelCase__ : List[str] = self.transformer( __lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample # perform guidance if guidance_scale > 1: lowerCamelCase__ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowerCamelCase__ : Dict = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 ) lowerCamelCase__ : Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowerCamelCase__ : Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 ) lowerCamelCase__ : Tuple = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowerCamelCase__ : Union[str, Any] = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 ) else: lowerCamelCase__ : str = noise_pred # compute previous image: x_t -> x_t-1 lowerCamelCase__ : Dict = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample if guidance_scale > 1: lowerCamelCase__ : int = latent_model_input.chunk(2 , dim=0 ) else: lowerCamelCase__ : str = latent_model_input lowerCamelCase__ : Optional[int] = 1 / self.vae.config.scaling_factor * latents lowerCamelCase__ : Optional[int] = self.vae.decode(__lowerCamelCase ).sample lowerCamelCase__ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase__ : Dict = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase__ : List[Any] = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__lowerCamelCase )
712
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = None class _lowercase ( lowercase__ , lowercase__): """simple docstring""" A__ = 2 @register_to_config def __init__( self : List[str] , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 100 , __lowerCamelCase : float = 1.0_0_7 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.0_5 , __lowerCamelCase : float = 50 , ): '''simple docstring''' lowerCamelCase__ : List[str] = sigma_max # setable values lowerCamelCase__ : int = None lowerCamelCase__ : np.IntTensor = None lowerCamelCase__ : torch.FloatTensor = None # sigma(t_i) def lowerCAmelCase ( self : Any , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ): '''simple docstring''' return sample def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ): '''simple docstring''' lowerCamelCase__ : Tuple = num_inference_steps lowerCamelCase__ : int = np.arange(0 , self.num_inference_steps )[::-1].copy() lowerCamelCase__ : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) lowerCamelCase__ : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowerCamelCase__ : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.floataa , device=__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : Optional[torch.Generator] = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase__ : Dict = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase__ : Tuple = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase__ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=__lowerCamelCase ).to(sample.device ) lowerCamelCase__ : str = sigma + gamma * sigma lowerCamelCase__ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : str = sample_hat + sigma_hat * model_output lowerCamelCase__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : Any = sample_prev + sigma_prev * model_output lowerCamelCase__ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ): '''simple docstring''' raise NotImplementedError()
713
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
0
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : int = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "efficientformer" def __init__( self : str , __lowerCamelCase : List[int] = [3, 2, 6, 4] , __lowerCamelCase : List[int] = [48, 96, 224, 448] , __lowerCamelCase : List[bool] = [True, True, True, True] , __lowerCamelCase : int = 448 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 4 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 4 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : float = 1E-5 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 1E-1_2 , __lowerCamelCase : int = 224 , __lowerCamelCase : float = 1E-0_5 , **__lowerCamelCase : int , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = hidden_sizes lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : str = num_channels lowerCamelCase__ : str = depths lowerCamelCase__ : Tuple = mlp_expansion_ratio lowerCamelCase__ : Dict = downsamples lowerCamelCase__ : Union[str, Any] = dim lowerCamelCase__ : List[Any] = key_dim lowerCamelCase__ : Optional[Any] = attention_ratio lowerCamelCase__ : int = resolution lowerCamelCase__ : int = pool_size lowerCamelCase__ : Optional[int] = downsample_patch_size lowerCamelCase__ : List[Any] = downsample_stride lowerCamelCase__ : Union[str, Any] = downsample_pad lowerCamelCase__ : List[str] = drop_path_rate lowerCamelCase__ : str = num_metaad_blocks lowerCamelCase__ : str = distillation lowerCamelCase__ : Dict = use_layer_scale lowerCamelCase__ : Optional[Any] = layer_scale_init_value lowerCamelCase__ : Optional[Any] = image_size lowerCamelCase__ : int = batch_norm_eps
714
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
0
'''simple docstring''' from __future__ import annotations def lowercase_ ( _A : str ): """simple docstring""" return [ord(_A ) - 96 for elem in plain] def lowercase_ ( _A : list[int] ): """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Dict = encode(input("-> " ).strip().lower() ) print("Encoded: " , _A ) print("Decoded:" , decode(_A ) ) if __name__ == "__main__": main()
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
from datetime import datetime import requests def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Any = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" lowerCamelCase__ : Tuple = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(_A ).content if __name__ == "__main__": A : Optional[Any] = input("Enter Video/IGTV url: ").strip() A : List[str] = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f'Done. Video saved to disk as {file_name}.')
716
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() A : Tuple = logging.get_logger(__name__) A : Dict = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } A : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase_ ( _A : List[str] ): """simple docstring""" lowerCamelCase__ : List[str] = {} with open(_A , "r" ) as file: for line_number, line in enumerate(_A ): lowerCamelCase__ : int = line.strip() if line: lowerCamelCase__ : Any = line.split() lowerCamelCase__ : Any = line_number lowerCamelCase__ : List[Any] = words[0] lowerCamelCase__ : int = value return result def lowercase_ ( _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : Tuple ): """simple docstring""" for attribute in key.split("." ): lowerCamelCase__ : Any = getattr(_A , _A ) lowerCamelCase__ : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_A ): lowerCamelCase__ : int = PARAM_MAPPING[full_name.split("." )[-1]] lowerCamelCase__ : Tuple = "param" if weight_type is not None and weight_type != "param": lowerCamelCase__ : Union[str, Any] = getattr(_A , _A ).shape elif weight_type is not None and weight_type == "param": lowerCamelCase__ : Tuple = hf_pointer for attribute in hf_param_name.split("." ): lowerCamelCase__ : Tuple = getattr(_A , _A ) lowerCamelCase__ : List[str] = shape_pointer.shape # let's reduce dimension lowerCamelCase__ : List[str] = value[0] else: lowerCamelCase__ : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCamelCase__ : Dict = value elif weight_type == "weight_g": lowerCamelCase__ : Union[str, Any] = value elif weight_type == "weight_v": lowerCamelCase__ : Union[str, Any] = value elif weight_type == "bias": lowerCamelCase__ : int = value elif weight_type == "param": for attribute in hf_param_name.split("." ): lowerCamelCase__ : Any = getattr(_A , _A ) lowerCamelCase__ : int = value else: lowerCamelCase__ : Dict = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def lowercase_ ( _A : List[Any] , _A : Any , _A : List[Any] , _A : List[Any] , _A : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_A ): lowerCamelCase__ : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]] lowerCamelCase__ : int = "param" if weight_type is not None and weight_type != "param": lowerCamelCase__ : Union[str, Any] = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": lowerCamelCase__ : Union[str, Any] = ".".join([key, hf_param_name] ) else: lowerCamelCase__ : str = key lowerCamelCase__ : List[str] = value if "lm_head" in full_key else value[0] A : int = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase_ ( _A : Optional[int] , _A : Any , _A : Optional[Any]=None , _A : Tuple=None ): """simple docstring""" lowerCamelCase__ : int = False for key, mapped_key in MAPPING.items(): lowerCamelCase__ : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase__ : Tuple = True if "*" in mapped_key: lowerCamelCase__ : Any = name.split(_A )[0].split("." )[-2] lowerCamelCase__ : Optional[int] = mapped_key.replace("*" , _A ) if "weight_g" in name: lowerCamelCase__ : Union[str, Any] = "weight_g" elif "weight_v" in name: lowerCamelCase__ : List[Any] = "weight_v" elif "bias" in name: lowerCamelCase__ : List[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__ : Optional[Any] = "weight" else: lowerCamelCase__ : Tuple = None if hf_dict is not None: rename_dict(_A , _A , _A , _A , _A ) else: set_recursively(_A , _A , _A , _A , _A ) return is_used return is_used def lowercase_ ( _A : Dict , _A : int , _A : int ): """simple docstring""" lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : Union[str, Any] = fairseq_model.state_dict() lowerCamelCase__ : Optional[int] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__ : Optional[int] = True else: lowerCamelCase__ : Optional[int] = load_wavaveca_layer(_A , _A , _A ) if not is_used: unused_weights.append(_A ) logger.warning(F"Unused weights: {unused_weights}" ) def lowercase_ ( _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : int = full_name.split("conv_layers." )[-1] lowerCamelCase__ : Optional[Any] = name.split("." ) lowerCamelCase__ : Any = int(items[0] ) lowerCamelCase__ : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCamelCase__ : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCamelCase__ : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) lowerCamelCase__ : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) lowerCamelCase__ : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_A ) @torch.no_grad() def lowercase_ ( _A : int , _A : Any , _A : Any=None , _A : List[Any]=None , _A : Tuple=True , _A : List[Any]=False ): """simple docstring""" if config_path is not None: lowerCamelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained(_A ) else: lowerCamelCase__ : str = WavaVecaConfig() if is_seq_class: lowerCamelCase__ : str = read_txt_into_dict(_A ) lowerCamelCase__ : List[Any] = idalabel lowerCamelCase__ : List[Any] = WavaVecaForSequenceClassification(_A ) lowerCamelCase__ : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , ) feature_extractor.save_pretrained(_A ) elif is_finetuned: if dict_path: lowerCamelCase__ : Dict = Dictionary.load(_A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__ : List[Any] = target_dict.pad_index lowerCamelCase__ : str = target_dict.bos_index lowerCamelCase__ : str = target_dict.eos_index lowerCamelCase__ : Tuple = len(target_dict.symbols ) lowerCamelCase__ : Optional[Any] = os.path.join(_A , "vocab.json" ) if not os.path.isdir(_A ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) ) return os.makedirs(_A , exist_ok=_A ) lowerCamelCase__ : List[str] = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__ : str = 0 lowerCamelCase__ : Dict = 1 with open(_A , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_A , _A ) lowerCamelCase__ : Any = WavaVecaCTCTokenizer( _A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_A , ) lowerCamelCase__ : Dict = True if config.feat_extract_norm == "layer" else False lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , ) lowerCamelCase__ : Any = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A ) processor.save_pretrained(_A ) lowerCamelCase__ : int = WavaVecaForCTC(_A ) else: lowerCamelCase__ : Optional[Any] = WavaVecaForPreTraining(_A ) if is_finetuned or is_seq_class: lowerCamelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCamelCase__ : Dict = argparse.Namespace(task="audio_pretraining" ) lowerCamelCase__ : Any = fairseq.tasks.setup_task(_A ) lowerCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A ) lowerCamelCase__ : List[str] = model[0].eval() recursively_load_weights(_A , _A , not is_finetuned ) hf_wavavec.save_pretrained(_A ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) A : Optional[Any] = parser.parse_args() A : Any = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
def UpperCamelCase__ ( _A : float ): """simple docstring""" return 10 - x * x def UpperCamelCase__ ( _A : float , _A : float ): """simple docstring""" if equation(_A ) * equation(_A ) >= 0: raise ValueError("Wrong space!" ) lowerCamelCase__ : List[str] = a while (b - a) >= 0.01: # Find middle point lowerCamelCase__ : List[Any] = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: lowerCamelCase__ : List[str] = c else: lowerCamelCase__ : List[Any] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
718
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import math import unittest def lowercase_ ( _A : int ): """simple docstring""" assert isinstance(_A , _A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowerCAmelCase ( self : int ): '''simple docstring''' with self.assertRaises(__lowerCamelCase ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
720
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( lowerCamelCase__ ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
721
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
0
def lowercase_ ( _A : List[Any] , _A : int , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : int ): if index == r: for j in range(_A ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location lowerCamelCase__ : Optional[Any] = arr[i] combination_util(_A , _A , _A , index + 1 , _A , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_A , _A , _A , _A , _A , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowercase_ ( _A : List[Any] , _A : str , _A : int ): lowerCamelCase__ : Dict = [0] * r # Print all combination using temporary array 'data[]' combination_util(_A , _A , _A , 0 , _A , 0 ) if __name__ == "__main__": # Driver code to check the function above A : str = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
700
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
0
from collections import defaultdict def lowercase_ ( _A : str , _A : str ): """simple docstring""" lowerCamelCase__ : Optional[int] = first_str.lower().strip() lowerCamelCase__ : Optional[Any] = second_str.lower().strip() # Remove whitespace lowerCamelCase__ : Tuple = first_str.replace(" " , "" ) lowerCamelCase__ : str = second_str.replace(" " , "" ) # Strings of different lengths are not anagrams if len(_A ) != len(_A ): return False # Default values for count should be 0 lowerCamelCase__ : defaultdict[str, int] = defaultdict(_A ) # For each character in input strings, # increment count in the corresponding for i in range(len(_A ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() A : Dict = input("Enter the first string ").strip() A : int = input("Enter the second string ").strip() A : Union[str, Any] = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
701
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
702
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = tempfile.mkdtemp() lowerCamelCase__ : Optional[int] = BlipImageProcessor() lowerCamelCase__ : Tuple = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCamelCase__ : Optional[Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" ) lowerCamelCase__ : List[str] = InstructBlipProcessor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def lowerCAmelCase ( self : Dict , **__lowerCamelCase : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def lowerCAmelCase ( self : Optional[int] , **__lowerCamelCase : int ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).qformer_tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase__ : str = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) lowerCamelCase__ : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) self.assertIsInstance(processor.qformer_tokenizer , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.get_image_processor() lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Optional[int] = self.get_qformer_tokenizer() lowerCamelCase__ : Dict = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase__ : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="np" ) lowerCamelCase__ : Optional[int] = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.get_image_processor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Any = self.get_qformer_tokenizer() lowerCamelCase__ : int = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : str = "lower newer" lowerCamelCase__ : Optional[int] = processor(text=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) lowerCamelCase__ : List[Any] = qformer_tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : int = self.get_image_processor() lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : List[str] = self.get_qformer_tokenizer() lowerCamelCase__ : Dict = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Dict = "lower newer" lowerCamelCase__ : List[str] = self.prepare_image_inputs() lowerCamelCase__ : List[str] = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.get_image_processor() lowerCamelCase__ : Optional[Any] = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = self.get_qformer_tokenizer() lowerCamelCase__ : str = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ : int = processor.batch_decode(__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = self.get_image_processor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = self.get_qformer_tokenizer() lowerCamelCase__ : str = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Dict = "lower newer" lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase__ : Any = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
703
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
0
from __future__ import annotations def lowercase_ ( _A : list[int] ): """simple docstring""" return len(set(_A ) ) == len(_A ) if __name__ == "__main__": import doctest doctest.testmod()
704
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : List[str] = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
0
import functools def lowercase_ ( _A : list[int] , _A : list[int] ): """simple docstring""" if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(_A ) == 0: return 0 if min(_A ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(_A ) >= 366: raise ValueError("All days elements should be less than 366" ) lowerCamelCase__ : str = set(_A ) @functools.cache def dynamic_programming(_A : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
0
from math import pi, sqrt def lowercase_ ( _A : float ): """simple docstring""" if num <= 0: raise ValueError("math domain error" ) if num > 171.5: raise OverflowError("math range error" ) elif num - int(_A ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(_A ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase_ ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(_A ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() A : Dict = 1.0 while num: A : List[str] = float(input("Gamma of: ")) print(F'gamma({num}) = {gamma(num)}') print("\nEnter 0 to exit...")
707
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
import pprint import requests A : List[str] = "https://zenquotes.io/api" def lowercase_ ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + "/today" ).json() def lowercase_ ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": A : Union[str, Any] = random_quotes() pprint.pprint(response)
708
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _A : Optional[int] , _A : int , _A : Optional[int]=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match" lowerCamelCase__ : List[Any] = nn.Parameter(_A ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match" lowerCamelCase__ : Any = nn.Parameter(_A ) def lowercase_ ( _A : List[Any] , _A : Union[str, Any] , _A : str ): """simple docstring""" lowerCamelCase__ : List[Any] = np.asarray(weights[0] ) lowerCamelCase__ : Dict = np.asarray(weights[1] ) lowerCamelCase__ : Optional[int] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , ) def lowercase_ ( _A : Dict , _A : Tuple , _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : Tuple = np.asarray(weights[0] ) lowerCamelCase__ : List[Any] = np.asarray(weights[1] ) lowerCamelCase__ : str = np.asarray(weights[2] ) lowerCamelCase__ : Optional[int] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , ) def lowercase_ ( _A : Tuple , _A : str , _A : Dict ): """simple docstring""" lowerCamelCase__ : List[Any] = weights[0][0][0] lowerCamelCase__ : str = np.asarray(layer_norm_a[0] ) lowerCamelCase__ : Any = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # lsh weights + output lowerCamelCase__ : Union[str, Any] = weights[0][1] if len(_A ) < 4: set_layer_weights_in_torch_lsh(_A , torch_block.attention , _A ) else: set_layer_weights_in_torch_local(_A , torch_block.attention , _A ) # intermediate weighs lowerCamelCase__ : Dict = weights[2][0][1][2] # Chunked Feed Forward if len(_A ) == 4: lowerCamelCase__ : Optional[int] = intermediate_weights[2] # layernorm 2 lowerCamelCase__ : List[str] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # intermediate dense lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) # intermediate out lowerCamelCase__ : str = np.asarray(intermediate_weights[4][0] ) lowerCamelCase__ : Tuple = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) def lowercase_ ( _A : List[Any] , _A : List[str] , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Optional[int] = torch_model.reformer # word embeds lowerCamelCase__ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_A ) , ) if isinstance(weights[3] , _A ): lowerCamelCase__ : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase__ : Optional[int] = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"{position_embeddings[emb_idx]} emb does not match" lowerCamelCase__ : str = nn.Parameter(torch.tensor(_A ) ) lowerCamelCase__ : Optional[int] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _A ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_A , _A , _A ) # output layer norm lowerCamelCase__ : Dict = np.asarray(weights[7][0] ) lowerCamelCase__ : Any = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # output embeddings lowerCamelCase__ : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase__ : Union[str, Any] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) def lowercase_ ( _A : Any , _A : Union[str, Any] , _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : Any = ReformerConfig.from_json_file(_A ) print(F"Building PyTorch model from configuration: {config}" ) lowerCamelCase__ : List[Any] = ReformerModelWithLMHead(_A ) with open(_A , "rb" ) as f: lowerCamelCase__ : Tuple = pickle.load(_A )["weights"] set_model_weights_in_torch(_A , _A , config.hidden_size ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _A ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
709
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
0
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) A : Any = getLogger(__name__) def lowercase_ ( _A : str , _A : str , _A : str , _A : int = 8 , _A : int = 1024 , _A : List[str]="val" , _A : Tuple=None , _A : Union[str, Any]=False , _A : Dict="summarization" , _A : str=None , _A : List[Any]=1 , _A : Dict = None , _A : int="" , **_A : str , ): """simple docstring""" lowerCamelCase__ : Tuple = str(_A ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=_A ) lowerCamelCase__ : Optional[Any] = Path(_A ) lowerCamelCase__ : Tuple = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(_A ) lowerCamelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ).cuda() if fpaa: lowerCamelCase__ : Dict = model.half() # determine if we need to increase num_beams use_task_specific_params(_A , _A ) # update config with task specific params lowerCamelCase__ : Tuple = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: lowerCamelCase__ : List[Any] = num_return_sequences lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: lowerCamelCase__ : List[str] = tokenizer.model_max_length if prefix is None: lowerCamelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or "" lowerCamelCase__ : Dict = SeqaSeqDataset( _A , _A , _A , max_target_length=1024 , type_path=_A , n_obs=_A , prefix=_A , **_A , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. lowerCamelCase__ : Optional[Any] = ds.make_sortish_sampler(_A , distributed=_A , add_extra_examples=_A , shuffle=_A ) lowerCamelCase__ : str = DataLoader(_A , sampler=_A , batch_size=_A , collate_fn=ds.collate_fn ) lowerCamelCase__ : List[str] = [] for batch in tqdm(_A ): lowerCamelCase__ : Optional[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_A , num_beams=_A , **_A , ) lowerCamelCase__ : Any = tokenizer.batch_decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) lowerCamelCase__ : str = batch["ids"] if num_return_sequences > 1: lowerCamelCase__ : str = chunks(_A , _A ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(_A ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(_A , _A ) return results, sampler.num_replicas def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=_A , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=_A , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=_A , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=_A , default=_A ) parser.add_argument( "--type_path" , type=_A , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=_A , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=_A , default=8 , required=_A , help="batch size" ) parser.add_argument( "--local_rank" , type=_A , default=-1 , required=_A , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=_A , default=_A , required=_A , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=_A , default=1 , required=_A , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=_A , default=600 , required=_A , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=_A , default=_A , required=_A ) parser.add_argument("--tgt_lang" , type=_A , default=_A , required=_A ) parser.add_argument( "--prefix" , type=_A , required=_A , default=_A , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) lowerCamelCase__ : List[Any] = time.time() lowerCamelCase__ : List[str] = parser.parse_known_args() lowerCamelCase__ : Optional[int] = parse_numeric_n_bool_cl_kwargs(_A ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) lowerCamelCase__ : int = Path(args.save_dir + "_tmp" ) Path(_A ).mkdir(exist_ok=_A ) # this handles locking. lowerCamelCase__ : Any = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. lowerCamelCase__ : Dict = {} if args.src_lang is not None: lowerCamelCase__ : Dict = args.src_lang if args.tgt_lang is not None: lowerCamelCase__ : Any = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=_A ) lowerCamelCase__ : Dict = eval_data_dir( args.data_dir , _A , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_A , **_A , ) if args.local_rank <= 0: lowerCamelCase__ : Tuple = Path(args.save_dir ) save_dir.mkdir(exist_ok=_A ) lowerCamelCase__ : Optional[int] = gather_results_from_each_node(_A , _A , args.sync_timeout ) lowerCamelCase__ : int = combine_partial_results(_A ) if args.num_return_sequences > 1: lowerCamelCase__ : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(_A , _A ) return lowerCamelCase__ : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(_A ) as f: lowerCamelCase__ : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(_A )] # Calculate metrics, save metrics, and save _generations.txt lowerCamelCase__ : Optional[Any] = "translation" in args.task lowerCamelCase__ : int = calculate_bleu if calc_bleu else calculate_rouge lowerCamelCase__ : List[str] = "bleu" if calc_bleu else "rouge" lowerCamelCase__ : Dict = score_fn(_A , _A ) lowerCamelCase__ : int = len(_A ) lowerCamelCase__ : Optional[int] = time.time() - start_time lowerCamelCase__ : str = round(runtime / metrics["n_obs"] , 4 ) lowerCamelCase__ : Any = num_replicas # TODO(@stas00): add whatever metadata to metrics lowerCamelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(_A , _A , indent=_A ) print(_A ) write_txt_file(_A , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(_A , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(_A ) def lowercase_ ( _A : List[str] ): """simple docstring""" lowerCamelCase__ : int = [] for partial_result in partial_results: records.extend(_A ) lowerCamelCase__ : Optional[int] = sorted(_A , key=lambda _A : x["id"] ) lowerCamelCase__ : Dict = [x["pred"] for x in records] return preds def lowercase_ ( _A : str , _A : str , _A : Dict ): """simple docstring""" lowerCamelCase__ : str = time.time() logger.info("waiting for all nodes to finish" ) lowerCamelCase__ : Dict = None while (time.time() - start_wait) < timeout: lowerCamelCase__ : Dict = list(save_dir.glob("rank_*.json" ) ) if len(_A ) < num_replicas: continue try: # make sure all json files are fully saved lowerCamelCase__ : Tuple = lmap(_A , _A ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
710
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
711
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
0
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
712
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
0
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _lowercase ( lowercase__ , lowercase__): """simple docstring""" @register_to_config def __init__( self : List[Any] , __lowerCamelCase : int = 768 , ): '''simple docstring''' super().__init__() lowerCamelCase__ : List[Any] = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) ) lowerCamelCase__ : int = nn.Parameter(torch.ones(1 , __lowerCamelCase ) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Union[str, torch.device]] = None , __lowerCamelCase : Optional[torch.dtype] = None , ): '''simple docstring''' lowerCamelCase__ : Dict = nn.Parameter(self.mean.to(__lowerCamelCase ).to(__lowerCamelCase ) ) lowerCamelCase__ : Union[str, Any] = nn.Parameter(self.std.to(__lowerCamelCase ).to(__lowerCamelCase ) ) return self def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : str = (embeds - self.mean) * 1.0 / self.std return embeds def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = (embeds * self.std) + self.mean return embeds
713
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
714
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = DiTPipeline A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A__ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A__ = False def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Dict = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__lowerCamelCase , ) lowerCamelCase__ : int = AutoencoderKL() lowerCamelCase__ : int = DDIMScheduler() lowerCamelCase__ : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=0 ): '''simple docstring''' if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Dict = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Any = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Tuple = "cpu" lowerCamelCase__ : int = self.get_dummy_components() lowerCamelCase__ : Any = self.pipeline_class(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) lowerCamelCase__ : Dict = pipe(**__lowerCamelCase ).images lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCamelCase__ : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] ) lowerCamelCase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__lowerCamelCase , 1E-3 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Dict = torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowerCamelCase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowerCamelCase__ : List[str] = pipe.get_label_ids(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type="np" ).images for word, image in zip(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Any = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowerCamelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowerCamelCase__ : Any = ["vase", "umbrella"] lowerCamelCase__ : int = pipe.get_label_ids(__lowerCamelCase ) lowerCamelCase__ : int = torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type="np" ).images for word, image in zip(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Dict = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCamelCase__ : List[str] = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCamelCase__ : Dict = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCamelCase__ : Tuple = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCamelCase__ : Any = i, i + z_result[i] - 1 return z_result def lowercase_ ( _A : int , _A : list[int] , _A : str ): """simple docstring""" return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def lowercase_ ( _A : str , _A : str ): """simple docstring""" lowerCamelCase__ : int = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCamelCase__ : Optional[int] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
716
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging A : Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowercase ( lowercase__): """simple docstring""" A__ = ["pixel_values"] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : int = size if size is not None else {"shortest_edge": 224} lowerCamelCase__ : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="crop_size" ) lowerCamelCase__ : List[str] = do_resize lowerCamelCase__ : int = size lowerCamelCase__ : Optional[int] = resample lowerCamelCase__ : List[str] = do_center_crop lowerCamelCase__ : Dict = crop_size lowerCamelCase__ : List[Any] = do_rescale lowerCamelCase__ : Optional[int] = rescale_factor lowerCamelCase__ : Optional[Any] = do_normalize lowerCamelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase__ : Optional[Any] = do_convert_rgb def lowerCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ): '''simple docstring''' lowerCamelCase__ : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) lowerCamelCase__ : Any = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' lowerCamelCase__ : Any = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ): '''simple docstring''' return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Dict = size if size is not None else self.size lowerCamelCase__ : str = get_size_dict(__lowerCamelCase , param_name="size" , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Dict = resample if resample is not None else self.resample lowerCamelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ : int = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : Optional[int] = image_std if image_std is not None else self.image_std lowerCamelCase__ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase__ : Tuple = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: lowerCamelCase__ : int = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: lowerCamelCase__ : Any = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: lowerCamelCase__ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] lowerCamelCase__ : Dict = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _lowercase ( lowercase__): """simple docstring""" A__ = "Speech2TextFeatureExtractor" A__ = "Speech2TextTokenizer" def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ): '''simple docstring''' super().__init__(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = self.feature_extractor lowerCamelCase__ : Optional[int] = False def __call__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) lowerCamelCase__ : Tuple = kwargs.pop("raw_speech" ) else: lowerCamelCase__ : Optional[int] = kwargs.pop("audio" , __lowerCamelCase ) lowerCamelCase__ : Any = kwargs.pop("sampling_rate" , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = kwargs.pop("text" , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: lowerCamelCase__ : List[str] = args[0] lowerCamelCase__ : int = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: lowerCamelCase__ : List[str] = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase ) if text is not None: lowerCamelCase__ : str = self.tokenizer(__lowerCamelCase , **__lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase__ : List[str] = encodings["input_ids"] return inputs def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @contextmanager def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) lowerCamelCase__ : int = True lowerCamelCase__ : Dict = self.tokenizer yield lowerCamelCase__ : Tuple = self.feature_extractor lowerCamelCase__ : Any = False
718
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if isinstance(_A , collections.abc.Iterable ): return x return (x, x) @require_tf class _lowercase : """simple docstring""" def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' pass def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None , **__lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = TFVisionTextDualEncoderModel(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : int = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model} lowerCamelCase__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase ) lowerCamelCase__ : Any = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) lowerCamelCase__ : Tuple = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) lowerCamelCase__ : List[Any] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : List[str] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) lowerCamelCase__ : List[str] = after_output[0].numpy() lowerCamelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCamelCase , 1E-5 ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : int = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) lowerCamelCase__ : List[str] = model( input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase ) lowerCamelCase__ : Dict = output.vision_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Optional[int] = to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Dict = to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] = output.text_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : float ): '''simple docstring''' lowerCamelCase__ : str = np.abs((a - b) ).max() self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , f"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = self.prepare_config_and_inputs() self.check_save_load(**__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__lowerCamelCase ) @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Any = self.get_pretrained_model_and_inputs() lowerCamelCase__ : Tuple = model_a(**__lowerCamelCase ) lowerCamelCase__ : int = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Tuple = model_a(**__lowerCamelCase ) lowerCamelCase__ : Optional[int] = after_outputs[0].numpy() lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCamelCase , 1E-5 ) @require_tf class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) lowerCamelCase__ : Optional[int] = 13 lowerCamelCase__ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCamelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCamelCase__ : List[Any] = random_attention_mask([batch_size, 4] ) lowerCamelCase__ : List[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : str = TFViTModel(__lowerCamelCase , name="vision_model" ) lowerCamelCase__ : int = TFBertModel(__lowerCamelCase , name="text_model" ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = TFViTModelTester(self ) lowerCamelCase__ : int = TFBertModelTester(self ) lowerCamelCase__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : str = bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ : List[str] = vision_config_and_inputs ( lowerCamelCase__ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) lowerCamelCase__ : List[Any] = 13 lowerCamelCase__ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCamelCase__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCamelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Any=None , **__lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : int = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) lowerCamelCase__ : List[Any] = model( input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase ) lowerCamelCase__ : List[str] = output.vision_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCamelCase__ : Tuple = to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Tuple = to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : List[Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = TFDeiTModel(__lowerCamelCase , name="vision_model" ) lowerCamelCase__ : Optional[int] = TFRobertaModel(__lowerCamelCase , name="text_model" ) return vision_model, text_model def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = TFDeiTModelTester(self ) lowerCamelCase__ : List[Any] = TFRobertaModelTester(self ) lowerCamelCase__ : int = vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Dict = bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ : str = vision_config_and_inputs ( lowerCamelCase__ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) lowerCamelCase__ : Any = 13 lowerCamelCase__ : int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCamelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCamelCase__ : int = random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = TFCLIPVisionModel(__lowerCamelCase , name="vision_model" ) lowerCamelCase__ : Union[str, Any] = TFBertModel(__lowerCamelCase , name="text_model" ) return vision_model, text_model def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = TFCLIPVisionModelTester(self ) lowerCamelCase__ : Optional[int] = TFBertModelTester(self ) lowerCamelCase__ : List[str] = clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] = vision_config_and_inputs ( lowerCamelCase__ ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) lowerCamelCase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase__ : int = processor( text=["una foto di un gatto", "una foto di un cane"] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" ) lowerCamelCase__ : Tuple = model(**__lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCamelCase__ : Optional[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowerCamelCase , atol=1E-3 ) )
719
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase__) , "Tatoeba directory does not exist.") class _lowercase ( unittest.TestCase): """simple docstring""" @cached_property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Any = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCamelCase ) @slow def lowerCAmelCase ( self : str ): '''simple docstring''' self.resolver.convert_models(["heb-eng"] ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCamelCase ) assert mmeta["long_pair"] == "heb-eng"
720
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
0
def lowercase_ ( _A : list , _A : int = 0 ): """simple docstring""" lowerCamelCase__ : List[Any] = length or len(_A ) lowerCamelCase__ : List[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: lowerCamelCase__ : Dict = list_data[i + 1], list_data[i] lowerCamelCase__ : str = True return list_data if not swapped else bubble_sort(_A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
721
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
0
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ): '''simple docstring''' self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = None ops.enable_eager_execution_internal() lowerCamelCase__ : Optional[Any] = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase__ : Optional[Any] = tf.config.list_logical_devices(device_type="CPU" ) lowerCamelCase__ : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase__ : Tuple = GradientAccumulator() lowerCamelCase__ : Union[str, Any] = tf.Variable([4.0, 3.0] ) lowerCamelCase__ : Union[str, Any] = create_optimizer(5E-5 , 10 , 5 ) lowerCamelCase__ : str = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): with strategy.scope(): lowerCamelCase__ : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : str , __lowerCamelCase : Tuple ): lowerCamelCase__ : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
700
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
0
import heapq as hq import math from collections.abc import Iterator class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = str(id_ ) lowerCamelCase__ : List[str] = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Any = [] lowerCamelCase__ : Dict = {} # {vertex:distance} def __lt__( self : List[Any] , __lowerCamelCase : Dict ): '''simple docstring''' return self.key < other.key def __repr__( self : Optional[Any] ): '''simple docstring''' return self.id def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ): '''simple docstring''' self.neighbors.append(__lowerCamelCase ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : List[Any] = weight def lowercase_ ( _A : str , _A : int , _A : Optional[int] , _A : List[str] ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _A ) graph[b - 1].add_edge(graph[a - 1] , _A ) def lowercase_ ( _A : list , _A : Vertex ): """simple docstring""" lowerCamelCase__ : str = [] for u in graph: lowerCamelCase__ : str = math.inf lowerCamelCase__ : str = None lowerCamelCase__ : str = 0 lowerCamelCase__ : Union[str, Any] = graph[:] while q: lowerCamelCase__ : Any = min(_A ) q.remove(_A ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase__ : Dict = u lowerCamelCase__ : int = u.edges[v.id] for i in range(1 , len(_A ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase_ ( _A : list , _A : Vertex ): """simple docstring""" for u in graph: lowerCamelCase__ : Any = math.inf lowerCamelCase__ : str = None lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Tuple = list(_A ) hq.heapify(_A ) while h: lowerCamelCase__ : int = hq.heappop(_A ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase__ : Dict = u lowerCamelCase__ : int = u.edges[v.id] hq.heapify(_A ) for i in range(1 , len(_A ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
701
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A : int = logging.get_logger(__name__) A : List[str] = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowercase ( lowercase__): """simple docstring""" A__ = "wavlm" def __init__( self : str , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Any=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-5 , __lowerCamelCase : Optional[Any]="group" , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : List[Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : str=False , __lowerCamelCase : Union[str, Any]=128 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Optional[Any]=320 , __lowerCamelCase : int=800 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=0.0_5 , __lowerCamelCase : int=10 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : List[Any]=320 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=100 , __lowerCamelCase : Any=256 , __lowerCamelCase : Dict=256 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int="mean" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Dict=256 , __lowerCamelCase : Dict=(512, 512, 512, 512, 1500) , __lowerCamelCase : List[Any]=(5, 3, 3, 1, 1) , __lowerCamelCase : Any=(1, 2, 3, 1, 1) , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=80 , __lowerCamelCase : Dict=0 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=False , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=None , **__lowerCamelCase : Any , ): '''simple docstring''' super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : Union[str, Any] = feat_extract_norm lowerCamelCase__ : List[Any] = feat_extract_activation lowerCamelCase__ : Any = list(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = list(__lowerCamelCase ) lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Tuple = conv_bias lowerCamelCase__ : Union[str, Any] = num_buckets lowerCamelCase__ : Dict = max_bucket_distance lowerCamelCase__ : Optional[Any] = num_conv_pos_embeddings lowerCamelCase__ : str = num_conv_pos_embedding_groups lowerCamelCase__ : int = len(self.conv_dim ) lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_dropout lowerCamelCase__ : Union[str, Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Dict = feat_proj_dropout lowerCamelCase__ : Any = final_dropout lowerCamelCase__ : List[Any] = layerdrop lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : str = initializer_range lowerCamelCase__ : Tuple = num_ctc_classes lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = do_stable_layer_norm lowerCamelCase__ : int = use_weighted_layer_sum lowerCamelCase__ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__ : Union[str, Any] = apply_spec_augment lowerCamelCase__ : Optional[int] = mask_time_prob lowerCamelCase__ : List[str] = mask_time_length lowerCamelCase__ : int = mask_time_min_masks lowerCamelCase__ : Optional[int] = mask_feature_prob lowerCamelCase__ : List[Any] = mask_feature_length # parameters for pretraining with codevector quantized representations lowerCamelCase__ : int = num_codevectors_per_group lowerCamelCase__ : List[Any] = num_codevector_groups lowerCamelCase__ : List[str] = contrastive_logits_temperature lowerCamelCase__ : str = num_negatives lowerCamelCase__ : Optional[Any] = codevector_dim lowerCamelCase__ : str = proj_codevector_dim lowerCamelCase__ : List[Any] = diversity_loss_weight # ctc loss lowerCamelCase__ : List[Any] = ctc_loss_reduction lowerCamelCase__ : Optional[int] = ctc_zero_infinity # adapter lowerCamelCase__ : Tuple = add_adapter lowerCamelCase__ : Tuple = adapter_kernel_size lowerCamelCase__ : Optional[Any] = adapter_stride lowerCamelCase__ : Dict = num_adapter_layers lowerCamelCase__ : Any = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase__ : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase__ : Optional[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Dict = list(__lowerCamelCase ) lowerCamelCase__ : int = list(__lowerCamelCase ) lowerCamelCase__ : Tuple = xvector_output_dim @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
702
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL A : List[Any] = logging.get_logger(__name__) def lowercase_ ( _A : np.ndarray , _A : Union[int, Iterable[int]] , _A : bool , _A : int ): """simple docstring""" def constraint_to_multiple_of(_A : Optional[Any] , _A : Optional[int] , _A : Any=0 , _A : int=None ): lowerCamelCase__ : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCamelCase__ : str = math.floor(val / multiple ) * multiple if x < min_val: lowerCamelCase__ : Any = math.ceil(val / multiple ) * multiple return x lowerCamelCase__ : int = (output_size, output_size) if isinstance(_A , _A ) else output_size lowerCamelCase__ : int = get_image_size(_A ) lowerCamelCase__ : List[str] = output_size # determine new height and width lowerCamelCase__ : int = output_height / input_height lowerCamelCase__ : int = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCamelCase__ : Tuple = scale_width else: # fit height lowerCamelCase__ : Optional[int] = scale_height lowerCamelCase__ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_A ) lowerCamelCase__ : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=_A ) return (new_height, new_width) class _lowercase ( lowercase__): """simple docstring""" A__ = ["pixel_values"] def __init__( self : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : List[str] , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = size if size is not None else {"height": 384, "width": 384} lowerCamelCase__ : Any = get_size_dict(__lowerCamelCase ) lowerCamelCase__ : Any = do_resize lowerCamelCase__ : int = size lowerCamelCase__ : str = keep_aspect_ratio lowerCamelCase__ : Tuple = ensure_multiple_of lowerCamelCase__ : Union[str, Any] = resample lowerCamelCase__ : int = do_rescale lowerCamelCase__ : int = rescale_factor lowerCamelCase__ : List[Any] = do_normalize lowerCamelCase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' lowerCamelCase__ : str = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) lowerCamelCase__ : List[Any] = get_resize_output_image_size( __lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__lowerCamelCase , multiple=__lowerCamelCase , ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ): '''simple docstring''' return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : List[Any] , ): '''simple docstring''' lowerCamelCase__ : str = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Tuple = size if size is not None else self.size lowerCamelCase__ : Dict = get_size_dict(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCamelCase__ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCamelCase__ : int = resample if resample is not None else self.resample lowerCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : Dict = image_std if image_std is not None else self.image_std lowerCamelCase__ : List[Any] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCamelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: lowerCamelCase__ : Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : Dict = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: lowerCamelCase__ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] lowerCamelCase__ : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] lowerCamelCase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Tuple] = None ): '''simple docstring''' lowerCamelCase__ : List[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): lowerCamelCase__ : Any = target_sizes.numpy() lowerCamelCase__ : Optional[Any] = [] for idx in range(len(__lowerCamelCase ) ): lowerCamelCase__ : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: lowerCamelCase__ : Union[str, Any] = logits.argmax(dim=1 ) lowerCamelCase__ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
703
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
0
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : Optional[Any]=4 , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : Optional[int] = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : str = is_training lowerCamelCase__ : Dict = use_attention_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : Tuple = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[Any] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Dict = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Union[str, Any] = num_choices def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Dict = None if self.use_attention_mask: lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[int] = None if self.use_token_type_ids: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Optional[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() lowerCamelCase__ : List[Any] = config_and_inputs lowerCamelCase__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = FlaxAlbertModelTester(self ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = (1, 11, 768) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Any = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
704
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
0