code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections.abc import Iterable from typing import Any class __A : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : int | None = None ) ->Optional[Any]: """simple docstring""" snake_case_ = value snake_case_ = None # Added in order to delete a node easier snake_case_ = None snake_case_ = None def __repr__( self : str ) ->str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __A : '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : Node | None = None ) ->str: """simple docstring""" snake_case_ = root def __str__( self : Dict ) ->str: """simple docstring""" return str(self.root ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node | None ) ->None: """simple docstring""" if new_children is not None: # reset its kids snake_case_ = node.parent if node.parent is not None: # reset its parent if self.is_right(UpperCAmelCase_ ): # If it is the right children snake_case_ = new_children else: snake_case_ = new_children else: snake_case_ = new_children def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Node ) ->bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def lowerCAmelCase ( self : Any ) ->bool: """simple docstring""" return self.root is None def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = Node(UpperCAmelCase_ ) # create a new Node if self.empty(): # if Tree is empty snake_case_ = new_node # set its root else: # Tree is not empty snake_case_ = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: snake_case_ = new_node # We insert the new node in a leaf break else: snake_case_ = parent_node.left else: if parent_node.right is None: snake_case_ = new_node break else: snake_case_ = parent_node.right snake_case_ = parent_node def lowerCAmelCase ( self : Dict , *UpperCAmelCase_ : Dict ) ->None: """simple docstring""" for value in values: self.__insert(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Tuple ) ->Node | None: """simple docstring""" if self.empty(): raise IndexError("""Warning: Tree is empty! please use another.""" ) else: snake_case_ = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: snake_case_ = node.left if value < node.value else node.right return node def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Node | None = None ) ->Node | None: """simple docstring""" if node is None: if self.root is None: return None snake_case_ = self.root if not self.empty(): while node.right is not None: snake_case_ = node.right return node def lowerCAmelCase ( self : str , UpperCAmelCase_ : Node | None = None ) ->Node | None: """simple docstring""" if node is None: snake_case_ = self.root if self.root is None: return None if not self.empty(): snake_case_ = self.root while node.left is not None: snake_case_ = node.left return node def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = self.search(UpperCAmelCase_ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(UpperCAmelCase_ , UpperCAmelCase_ ) elif node.left is None: # Has only right children self.__reassign_nodes(UpperCAmelCase_ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(UpperCAmelCase_ , node.left ) else: snake_case_ = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore snake_case_ = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Node | None ) ->Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Dict=None ) ->Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : list , UpperCAmelCase_ : Node | None ) ->None: """simple docstring""" if node: self.inorder(UpperCAmelCase_ , node.left ) arr.append(node.value ) self.inorder(UpperCAmelCase_ , node.right ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Node ) ->int: """simple docstring""" snake_case_ = [] self.inorder(UpperCAmelCase_ , UpperCAmelCase_ ) # append all values to list using inorder traversal return arr[k - 1] def _a ( _SCREAMING_SNAKE_CASE ) -> list[Node]: snake_case_ = [] if curr_node is not None: snake_case_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def _a ( ) -> None: snake_case_ = (8, 3, 6, 1, 10, 14, 13, 4, 7) snake_case_ = BinarySearchTree() for i in testlist: t.insert(_SCREAMING_SNAKE_CASE ) # Prints all the elements of the list in order traversal print(_SCREAMING_SNAKE_CASE ) if t.search(6 ) is not None: print("""The value 6 exists""" ) else: print("""The value 6 doesn't exist""" ) if t.search(-1 ) is not None: print("""The value -1 exists""" ) else: print("""The value -1 doesn't exist""" ) if not t.empty(): print("""Max Value: """ , t.get_max().value ) # type: ignore print("""Min Value: """ , t.get_min().value ) # type: ignore for i in testlist: t.remove(_SCREAMING_SNAKE_CASE ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _a ( _SCREAMING_SNAKE_CASE ) -> float: return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) class __A : '''simple docstring''' def __init__( self : List[Any] , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None: """simple docstring""" snake_case_ = regularization snake_case_ = gamma if kernel == "linear": snake_case_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("""gamma must be float or int""" ) if not self.gamma > 0: raise ValueError("""gamma must be > 0""" ) snake_case_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: snake_case_ = F"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray ) ->float: """simple docstring""" return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray ) ->float: """simple docstring""" return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray ) ->None: """simple docstring""" snake_case_ = observations snake_case_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((snake_case_) , ) = np.shape(UpperCAmelCase_ ) def to_minimize(UpperCAmelCase_ : ndarray ) -> float: snake_case_ = 0 ((snake_case_) , ) = np.shape(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ ): for j in range(UpperCAmelCase_ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(UpperCAmelCase_ ) snake_case_ = LinearConstraint(UpperCAmelCase_ , 0 , 0 ) snake_case_ = Bounds(0 , self.regularization ) snake_case_ = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_ ) , bounds=UpperCAmelCase_ , constraints=[ly_contraint] ).x snake_case_ = l_star # calculating mean offset of separation plane to points snake_case_ = 0 for i in range(UpperCAmelCase_ ): for j in range(UpperCAmelCase_ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) snake_case_ = s / n def lowerCAmelCase ( self : str , UpperCAmelCase_ : ndarray ) ->int: """simple docstring""" snake_case_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple = { 'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'], 'tokenization_roformer': ['RoFormerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = ['RoFormerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ 'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerLayer', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ 'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerLayer', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ 'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 50_000_000 ) -> int: snake_case_ = set() snake_case_ = int((limit - 24) ** (1 / 2) ) snake_case_ = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _SCREAMING_SNAKE_CASE ) ) ) for primea in primes: snake_case_ = primea * primea for primea in primes: snake_case_ = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: snake_case_ = primea * primea * primea * primea snake_case_ = square + cube + tetr if total >= limit: break ret.add(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Optional[Any] = StableDiffusionInstructPixaPixPipeline __lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __lowercase: Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowercase: List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowercase: str = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ = CLIPTextModel(UpperCAmelCase_ ) snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=0 ) ->List[str]: """simple docstring""" snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ) if str(UpperCAmelCase_ ).startswith("""mps""" ): snake_case_ = torch.manual_seed(UpperCAmelCase_ ) else: snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ ) snake_case_ = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = sd_pipe(**UpperCAmelCase_ ).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : Tuple ) ->List[Any]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ ) snake_case_ = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = """french fries""" snake_case_ = sd_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ ) snake_case_ = output.images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : str ) ->List[Any]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ ) snake_case_ = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = [inputs["""prompt"""]] * 2 snake_case_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0 snake_case_ = torch.from_numpy(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ ) snake_case_ = image / 2 + 0.5 snake_case_ = image.permute(0 , 3 , 1 , 2 ) snake_case_ = image.repeat(2 , 1 , 1 , 1 ) snake_case_ = sd_pipe(**UpperCAmelCase_ ).images snake_case_ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case_ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : str ) ->Union[str, Any]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) snake_case_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ ) snake_case_ = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = sd_pipe(**UpperCAmelCase_ ).images snake_case_ = image[0, -3:, -3:, -1] snake_case_ = [round(UpperCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(UpperCAmelCase_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case_ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ ) snake_case_ = VaeImageProcessor(do_resize=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase_ , input_image_type="""pt""" ) )[0] snake_case_ = components["""vae"""] snake_case_ = self.get_dummy_inputs_by_type(UpperCAmelCase_ , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case_ = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case_ = pipe(**UpperCAmelCase_ )[0] snake_case_ = np.abs(out - out_latents_inputs ).max() self.assertLess(UpperCAmelCase_ , 1E-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str=0 ) ->List[Any]: """simple docstring""" snake_case_ = torch.manual_seed(UpperCAmelCase_ ) snake_case_ = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) snake_case_ = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = self.get_inputs() snake_case_ = pipe(**UpperCAmelCase_ ).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ ) snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = self.get_inputs() snake_case_ = pipe(**UpperCAmelCase_ ).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ ) snake_case_ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = self.get_inputs() snake_case_ = pipe(**UpperCAmelCase_ ).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = 0 def callback_fn(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor ) -> None: snake_case_ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case_ = latents[0, -3:, -3:, -1] snake_case_ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case_ = latents[0, -3:, -3:, -1] snake_case_ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case_ = False snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = self.get_inputs() pipe(**UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ = self.get_inputs() snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" snake_case_ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ = inputs["""image"""].resize((504, 504) ) snake_case_ = """timbrooks/instruct-pix2pix""" snake_case_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = output.images[0] snake_case_ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case_ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> list[float]: snake_case_ , snake_case_ = coefficient_matrix.shape snake_case_ , snake_case_ = constant_matrix.shape if rowsa != colsa: snake_case_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(_SCREAMING_SNAKE_CASE ) if colsa != 1: snake_case_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(_SCREAMING_SNAKE_CASE ) if rowsa != rowsa: snake_case_ = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) != rowsa: snake_case_ = ( """Number of initial values must be equal to number of rows in coefficient """ f"""matrix but received {len(_SCREAMING_SNAKE_CASE )} and {rowsa}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) snake_case_ = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) snake_case_ , snake_case_ = table.shape strictly_diagonally_dominant(_SCREAMING_SNAKE_CASE ) # Iterates the whole matrix for given number of times for _ in range(_SCREAMING_SNAKE_CASE ): snake_case_ = [] for row in range(_SCREAMING_SNAKE_CASE ): snake_case_ = 0 for col in range(_SCREAMING_SNAKE_CASE ): if col == row: snake_case_ = table[row][col] elif col == cols - 1: snake_case_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] snake_case_ = (temp + val) / denom new_val.append(_SCREAMING_SNAKE_CASE ) snake_case_ = new_val return [float(_SCREAMING_SNAKE_CASE ) for i in new_val] def _a ( _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ , snake_case_ = table.shape snake_case_ = True for i in range(0 , _SCREAMING_SNAKE_CASE ): snake_case_ = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = KandinskyVaaPriorPipeline __lowercase: Union[str, Any] = ["""prompt"""] __lowercase: Dict = ["""prompt""", """negative_prompt"""] __lowercase: Dict = [ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] __lowercase: List[str] = False @property def lowerCAmelCase ( self : Any ) ->Optional[Any]: """simple docstring""" return 32 @property def lowerCAmelCase ( self : Union[str, Any] ) ->Any: """simple docstring""" return 32 @property def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" return self.time_input_dim @property def lowerCAmelCase ( self : List[Any] ) ->List[Any]: """simple docstring""" return self.time_input_dim * 4 @property def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" return 100 @property def lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCAmelCase ( self : Union[str, Any] ) ->str: """simple docstring""" torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(UpperCAmelCase_ ) @property def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" torch.manual_seed(0 ) snake_case_ = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } snake_case_ = PriorTransformer(**UpperCAmelCase_ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 snake_case_ = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" torch.manual_seed(0 ) snake_case_ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) snake_case_ = CLIPVisionModelWithProjection(UpperCAmelCase_ ) return model @property def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" snake_case_ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor def lowerCAmelCase ( self : Optional[int] ) ->Tuple: """simple docstring""" snake_case_ = self.dummy_prior snake_case_ = self.dummy_image_encoder snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_image_processor snake_case_ = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , clip_sample_range=10.0 , ) snake_case_ = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->Tuple: """simple docstring""" if str(UpperCAmelCase_ ).startswith("""mps""" ): snake_case_ = torch.manual_seed(UpperCAmelCase_ ) else: snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = """cpu""" snake_case_ = self.get_dummy_components() snake_case_ = self.pipeline_class(**UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) snake_case_ = output.image_embeds snake_case_ = pipe( **self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0] snake_case_ = image[0, -10:] snake_case_ = image_from_tuple[0, -10:] assert image.shape == (1, 32) snake_case_ = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = torch_device == """cpu""" snake_case_ = True snake_case_ = False self._test_inference_batch_single_identical( test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , test_mean_pixel_difference=UpperCAmelCase_ , ) @skip_mps def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = torch_device == """cpu""" snake_case_ = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCAmelCase_ , test_mean_pixel_difference=UpperCAmelCase_ , )
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" from maths.prime_check import is_prime def _a ( _SCREAMING_SNAKE_CASE ) -> int: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""Input value of [number={number}] must be an integer""" raise TypeError(_SCREAMING_SNAKE_CASE ) if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" __SCREAMING_SNAKE_CASE : int = 'Alexander Joslin' import operator as op from .stack import Stack def _a ( _SCREAMING_SNAKE_CASE ) -> int: snake_case_ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} snake_case_ = Stack() snake_case_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(_SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 snake_case_ = operator_stack.peek() operator_stack.pop() snake_case_ = operand_stack.peek() operand_stack.pop() snake_case_ = operand_stack.peek() operand_stack.pop() snake_case_ = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) operand_stack.push(_SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __A (snake_case__): '''simple docstring''' __lowercase: Tuple = """yolos""" def __init__( self : str , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : List[str]=[512, 864] , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=100 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Any=0.1 , **UpperCAmelCase_ : Tuple , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias snake_case_ = num_detection_tokens snake_case_ = use_mid_position_embeddings snake_case_ = auxiliary_loss # Hungarian matcher snake_case_ = class_cost snake_case_ = bbox_cost snake_case_ = giou_cost # Loss coefficients snake_case_ = bbox_loss_coefficient snake_case_ = giou_loss_coefficient snake_case_ = eos_coefficient class __A (snake_case__): '''simple docstring''' __lowercase: Dict = version.parse("""1.11""") @property def lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Optional[int] ) ->float: """simple docstring""" return 1E-4 @property def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" return 12
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
1
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
1
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" debug_launcher(test_script.main ) def lowerCAmelCase ( self : Any ) ->int: """simple docstring""" debug_launcher(test_ops.main )
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> "list[int]": if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) snake_case_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 snake_case_ = 1 if upper_limit > 0: snake_case_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(_SCREAMING_SNAKE_CASE ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: __SCREAMING_SNAKE_CASE : List[str] = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __SCREAMING_SNAKE_CASE : int = os.path.join(git_repo_path, 'src', 'transformers') __SCREAMING_SNAKE_CASE : List[Any] = '\n{0} = None\n' __SCREAMING_SNAKE_CASE : Union[str, Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __SCREAMING_SNAKE_CASE : List[Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Tuple: """simple docstring""" snake_case_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(UpperCAmelCase_ ) snake_case_ = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(UpperCAmelCase_ , """tokenizers""" ) snake_case_ = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(UpperCAmelCase_ , """tensorflow_text""" ) snake_case_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(UpperCAmelCase_ , """sentencepiece_and_tokenizers""" ) snake_case_ = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(UpperCAmelCase_ , """sentencepiece_and_tensorflow_text""" ) snake_case_ = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(UpperCAmelCase_ , """sentencepiece_and_tokenizers_and_vision""" ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , UpperCAmelCase_ ) self.assertIn("""tensorflow_text""" , UpperCAmelCase_ ) self.assertIn("""sentencepiece_and_tokenizers""" , UpperCAmelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertModel""" , objects["""tf"""] ) self.assertIn("""FlaxBertModel""" , objects["""flax"""] ) self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] ) def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(UpperCAmelCase_ , """\nCONSTANT = None\n""" ) snake_case_ = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( UpperCAmelCase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) snake_case_ = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ snake_case_ = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ snake_case_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , UpperCAmelCase_ )
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
1
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ) -> Dict: print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": snake_case_ = timm.create_model("""levit_128s""" , pretrained=_SCREAMING_SNAKE_CASE ) else: snake_case_ = timm.create_model("""levit_128""" , pretrained=_SCREAMING_SNAKE_CASE ) if hidden_sizes == 192: snake_case_ = timm.create_model("""levit_192""" , pretrained=_SCREAMING_SNAKE_CASE ) if hidden_sizes == 256: snake_case_ = timm.create_model("""levit_256""" , pretrained=_SCREAMING_SNAKE_CASE ) if hidden_sizes == 384: snake_case_ = timm.create_model("""levit_384""" , pretrained=_SCREAMING_SNAKE_CASE ) from_model.eval() snake_case_ = LevitForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() snake_case_ = OrderedDict() snake_case_ = from_model.state_dict() snake_case_ = list(from_model.state_dict().keys() ) snake_case_ = list(our_model.state_dict().keys() ) print(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): snake_case_ = weights[og_keys[i]] our_model.load_state_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = torch.randn((2, 3, 224, 224) ) snake_case_ = from_model(_SCREAMING_SNAKE_CASE ) snake_case_ = our_model(_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one." snake_case_ = name print(_SCREAMING_SNAKE_CASE ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) snake_case_ = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True ) -> Optional[int]: snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = 1_000 snake_case_ = (1, num_labels) snake_case_ = """huggingface/label-files""" snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE ) snake_case_ = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } snake_case_ = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, expected_shape if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() __SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
347
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 2_000_000 ) -> int: snake_case_ = [0 for i in range(n + 1 )] snake_case_ = 1 snake_case_ = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ): snake_case_ = 1 snake_case_ = 0 for i in range(_SCREAMING_SNAKE_CASE ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __A : '''simple docstring''' __lowercase: int = None def lowerCAmelCase ( self : Tuple ) ->str: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Tuple: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(UpperCAmelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCAmelCase_ ) snake_case_ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0] check_json_file_has_correct_format(UpperCAmelCase_ ) snake_case_ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = self.feature_extraction_class() self.assertIsNotNone(UpperCAmelCase_ )
347
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __A : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Tuple=32 * 4 , UpperCAmelCase_ : List[Any]=32 * 6 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Any=32 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = is_training snake_case_ = use_auxiliary_loss snake_case_ = num_queries snake_case_ = num_channels snake_case_ = min_size snake_case_ = max_size snake_case_ = num_labels snake_case_ = mask_feature_size def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCAmelCase_ ) snake_case_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_ ) snake_case_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_ ) > 0.5 ).float() snake_case_ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_ ) > 0.5).long() snake_case_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowerCAmelCase ( self : str ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs() snake_case_ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) ->Optional[Any]: """simple docstring""" snake_case_ = output.encoder_hidden_states snake_case_ = output.pixel_decoder_hidden_states snake_case_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase_ ) , config.decoder_config.decoder_layers ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=False ) ->List[Any]: """simple docstring""" with torch.no_grad(): snake_case_ = MaskFormerModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() snake_case_ = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ) ->str: """simple docstring""" snake_case_ = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() def comm_check_on_output(UpperCAmelCase_ : Union[str, Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): snake_case_ = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) comm_check_on_output(UpperCAmelCase_ ) snake_case_ = model( pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ) comm_check_on_output(UpperCAmelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase: Optional[int] = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase: Union[str, Any] = False __lowercase: List[str] = False __lowercase: str = False __lowercase: List[Any] = False def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" snake_case_ = MaskFormerModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Union[str, Any] ) ->Any: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_ ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowerCAmelCase ( self : Union[str, Any] ) ->str: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" pass def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) @slow def lowerCAmelCase ( self : Dict ) ->Optional[int]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: snake_case_ = MaskFormerModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCAmelCase ( self : str ) ->Union[str, Any]: """simple docstring""" snake_case_ = (self.model_tester.min_size,) * 2 snake_case_ = { """pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase_ ), """mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase_ ), """class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase_ ).long(), } snake_case_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase_ ) snake_case_ = model(**UpperCAmelCase_ ) self.assertTrue(outputs.loss is not None ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ ) snake_case_ = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ ) self.assertTrue(outputs.attentions is not None ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss snake_case_ = self.all_model_classes[1] snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() snake_case_ = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ).loss loss.backward() def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ = self.all_model_classes[1] snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = True snake_case_ = True snake_case_ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() snake_case_ = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ) snake_case_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() snake_case_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't snake_case_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() snake_case_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCAmelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE : Tuple = 1E-4 def _a ( ) -> Optional[Any]: snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __A (unittest.TestCase): '''simple docstring''' @cached_property def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase_ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ ) snake_case_ = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ ) snake_case_ = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) snake_case_ = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) snake_case_ = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase_ ) .eval() ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ ) snake_case_ = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ ) # masks_queries_logits snake_case_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) snake_case_ = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] snake_case_ = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) # class_queries_logits snake_case_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) snake_case_ = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" snake_case_ = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(UpperCAmelCase_ ) .eval() ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ ) snake_case_ = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ ) # masks_queries_logits snake_case_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) snake_case_ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] snake_case_ = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) # class_queries_logits snake_case_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) snake_case_ = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" snake_case_ = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase_ ) .eval() ) snake_case_ = self.default_image_processor snake_case_ = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) snake_case_ = inputs["""pixel_values"""].to(UpperCAmelCase_ ) snake_case_ = [el.to(UpperCAmelCase_ ) for el in inputs["""mask_labels"""]] snake_case_ = [el.to(UpperCAmelCase_ ) for el in inputs["""class_labels"""]] with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ ) self.assertTrue(outputs.loss is not None )
347
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
1
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __A : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=19 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[Any]=None , ) ->Optional[Any]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) ->List[str]: """simple docstring""" snake_case_ = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCAmelCase_ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = EsmForProteinFolding(config=UpperCAmelCase_ ).float() model.to(UpperCAmelCase_ ) model.eval() snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = False __lowercase: Optional[int] = (EsmForProteinFolding,) if is_torch_available() else () __lowercase: List[Any] = () __lowercase: Union[str, Any] = {} if is_torch_available() else {} __lowercase: List[Any] = False def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = EsmFoldModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) @unittest.skip("""Does not support attention outputs""" ) def lowerCAmelCase ( self : int ) ->Optional[Any]: """simple docstring""" pass @unittest.skip def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Union[str, Any] ) ->Any: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def lowerCAmelCase ( self : List[Any] ) ->List[Any]: """simple docstring""" pass @unittest.skip("""ESMFold only has one output format.""" ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" pass @unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" ) def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support input chunking.""" ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" pass @unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" ) def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support data parallel.""" ) def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" pass @require_torch class __A (snake_case__): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Tuple: """simple docstring""" snake_case_ = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() snake_case_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) snake_case_ = model(UpperCAmelCase_ )["""positions"""] snake_case_ = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCAmelCase_ , atol=1E-4 ) )
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) @dataclass class __A (snake_case__): '''simple docstring''' __lowercase: Any = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : Optional[int] , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case_ = deprecated_arg[3:] setattr(self , UpperCAmelCase_ , not kwargs.pop(UpperCAmelCase_ ) ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) snake_case_ = kwargs.pop("""torchscript""" , self.torchscript ) snake_case_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) snake_case_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**UpperCAmelCase_ ) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Trace the models using torchscript"""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""}) __lowercase: str = field( default="""O1""" , metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) } , ) @cached_property def lowerCAmelCase ( self : Dict ) ->Tuple["torch.device", int]: """simple docstring""" requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: snake_case_ = torch.device("""cpu""" ) snake_case_ = 0 elif is_torch_tpu_available(): snake_case_ = xm.xla_device() snake_case_ = 0 else: snake_case_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) snake_case_ = torch.cuda.device_count() return device, n_gpu @property def lowerCAmelCase ( self : str ) ->Tuple: """simple docstring""" return is_torch_tpu_available() and self.tpu @property def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowerCAmelCase ( self : List[str] ) ->"torch.device": """simple docstring""" requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" return self.n_gpu > 0
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> int: if not numbers: return 0 if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) or not all( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) snake_case_ = snake_case_ = snake_case_ = numbers[0] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # update the maximum and minimum subarray products snake_case_ = numbers[i] if number < 0: snake_case_ , snake_case_ = min_till_now, max_till_now snake_case_ = max(_SCREAMING_SNAKE_CASE , max_till_now * number ) snake_case_ = min(_SCREAMING_SNAKE_CASE , min_till_now * number ) # update the maximum product found till now snake_case_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max_prod
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: str = BlenderbotSmallTokenizer __lowercase: int = False def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" super().setUp() snake_case_ = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""] snake_case_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) snake_case_ = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""] snake_case_ = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Any , **UpperCAmelCase_ : List[str] ) ->Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = """adapt act apte""" snake_case_ = """adapt act apte""" return input_text, output_text def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" snake_case_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ = """adapt act apte""" snake_case_ = ["""adapt""", """act""", """ap@@""", """te"""] snake_case_ = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] snake_case_ = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) assert tok("""sam""" ).input_ids == [1_384] snake_case_ = """I am a small frog.""" snake_case_ = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""] snake_case_ = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def lowerCAmelCase ( self : Any ) ->int: """simple docstring""" snake_case_ = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) snake_case_ = """I am a small frog .""" snake_case_ = """.""" snake_case_ = tok(UpperCAmelCase_ )["""input_ids"""] snake_case_ = tok(UpperCAmelCase_ )["""input_ids"""] assert encoded[-1] == encoded_dot[0]
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" from maths.prime_factors import prime_factors def _a ( _SCREAMING_SNAKE_CASE ) -> int: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""Input value of [number={number}] must be an integer""" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(_SCREAMING_SNAKE_CASE ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : List[Any]=None , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = 13 snake_case_ = 7 snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = 99 snake_case_ = 384 snake_case_ = 2 snake_case_ = 4 snake_case_ = 37 snake_case_ = """gelu""" snake_case_ = 0.1 snake_case_ = 0.1 snake_case_ = 512 snake_case_ = 16 snake_case_ = 2 snake_case_ = 0.02 snake_case_ = 3 snake_case_ = 4 snake_case_ = 128 snake_case_ = 2 snake_case_ = 9 snake_case_ = 1 snake_case_ = None def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) ->Any: """simple docstring""" snake_case_ = TFConvBertModel(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = [input_ids, input_mask] snake_case_ = model(UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) ->Tuple: """simple docstring""" snake_case_ = TFConvBertForMaskedLM(config=UpperCAmelCase_ ) snake_case_ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.num_labels snake_case_ = TFConvBertForSequenceClassification(config=UpperCAmelCase_ ) snake_case_ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) ->List[Any]: """simple docstring""" snake_case_ = self.num_choices snake_case_ = TFConvBertForMultipleChoice(config=UpperCAmelCase_ ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" snake_case_ = self.num_labels snake_case_ = TFConvBertForTokenClassification(config=UpperCAmelCase_ ) snake_case_ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) ->Union[str, Any]: """simple docstring""" snake_case_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase_ ) snake_case_ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase: Optional[int] = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase: str = False __lowercase: str = False __lowercase: List[str] = False def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = TFConvBertModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ) ->Tuple: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] ) ->str: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True snake_case_ = True if hasattr(UpperCAmelCase_ , """use_cache""" ): snake_case_ = True snake_case_ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) snake_case_ = getattr(self.model_tester , """key_length""" , UpperCAmelCase_ ) for model_class in self.all_model_classes: snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = len(model(UpperCAmelCase_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_ , saved_model=UpperCAmelCase_ ) snake_case_ = os.path.join(UpperCAmelCase_ , """saved_model""" , """1""" ) snake_case_ = tf.keras.models.load_model(UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) if self.is_encoder_decoder: snake_case_ = outputs["""encoder_hidden_states"""] snake_case_ = outputs["""encoder_attentions"""] else: snake_case_ = outputs["""hidden_states"""] snake_case_ = outputs["""attentions"""] self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) snake_case_ = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCAmelCase ( self : List[str] ) ->Optional[int]: """simple docstring""" snake_case_ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True snake_case_ = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) snake_case_ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) snake_case_ = getattr(self.model_tester , """key_length""" , UpperCAmelCase_ ) snake_case_ = getattr(self.model_tester , """key_length""" , UpperCAmelCase_ ) def check_decoder_attentions_output(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = len(UpperCAmelCase_ ) self.assertEqual(out_len % 2 , 0 ) snake_case_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase_ : Optional[Any] ): snake_case_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: snake_case_ = True snake_case_ = False snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) snake_case_ = len(UpperCAmelCase_ ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) if self.is_encoder_decoder: snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_decoder_attentions_output(UpperCAmelCase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] snake_case_ = True snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) # Check attention is always last and order is fine snake_case_ = True snake_case_ = True snake_case_ = model_class(UpperCAmelCase_ ) snake_case_ = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) @require_tf class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" snake_case_ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] __SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} __SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str | None: snake_case_ = "" snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 for keychar, cipherchar in zip(cycle(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): snake_case_ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(_SCREAMING_SNAKE_CASE ) return decoded def _a ( _SCREAMING_SNAKE_CASE ) -> list[str]: snake_case_ = [] for key in product(_SCREAMING_SNAKE_CASE , repeat=3 ): snake_case_ = try_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if encoded is not None: possibles.append(_SCREAMING_SNAKE_CASE ) return possibles def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def _a ( _SCREAMING_SNAKE_CASE = "p059_cipher.txt" ) -> int: snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = Path(_SCREAMING_SNAKE_CASE ).parent.joinpath(_SCREAMING_SNAKE_CASE ).read_text(encoding="""utf-8""" ) snake_case_ = [int(_SCREAMING_SNAKE_CASE ) for number in data.strip().split(""",""" )] snake_case_ = filter_valid_chars(_SCREAMING_SNAKE_CASE ) for common_word in COMMON_WORDS: snake_case_ = filter_common_word(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 1: break snake_case_ = possibles[0] return sum(ord(_SCREAMING_SNAKE_CASE ) for char in decoded_text ) if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __SCREAMING_SNAKE_CASE : List[str] = '__DUMMY_TRANSFORMERS_USER__' __SCREAMING_SNAKE_CASE : Dict = 'Dummy User' __SCREAMING_SNAKE_CASE : Tuple = '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' __SCREAMING_SNAKE_CASE : Optional[Any] = 'https://hub-ci.huggingface.co' __SCREAMING_SNAKE_CASE : List[str] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' __SCREAMING_SNAKE_CASE : Optional[int] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' __SCREAMING_SNAKE_CASE : Union[str, Any] = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> str: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _SCREAMING_SNAKE_CASE ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: HfFolder.save_token(_SCREAMING_SNAKE_CASE ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def _a ( ) -> List[str]: return HfApi(endpoint=_SCREAMING_SNAKE_CASE ) @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = HfFolder.get_token() HfFolder.save_token(_SCREAMING_SNAKE_CASE ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: def _cleanup_repo(_SCREAMING_SNAKE_CASE ): hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> str: @contextmanager def _temporary_repo(_SCREAMING_SNAKE_CASE ): try: yield repo_id finally: cleanup_repo(_SCREAMING_SNAKE_CASE ) return _temporary_repo @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = f"""repo_txt_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data/text_data.txt""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = f"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = f"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: return hf_private_dataset_repo_zipped_img_data_
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" import os from collections.abc import Iterator def _a ( _SCREAMING_SNAKE_CASE = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ): snake_case_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"): yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip("""./""" ) def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: return f"""{i * " "}*""" if i else "\n##" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part: print(f"""{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}""" ) return new_path def _a ( _SCREAMING_SNAKE_CASE = "." ) -> None: snake_case_ = """""" for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ): snake_case_ , snake_case_ = os.path.split(_SCREAMING_SNAKE_CASE ) if filepath != old_path: snake_case_ = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = (filepath.count(os.sep ) + 1) if filepath else 0 snake_case_ = f"""{filepath}/{filename}""".replace(""" """ , """%20""" ) snake_case_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f"""{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md('.')
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __SCREAMING_SNAKE_CASE : Tuple = datasets.logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __SCREAMING_SNAKE_CASE : str = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __SCREAMING_SNAKE_CASE : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) snake_case_ = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ = score.BleurtScorer(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->int: """simple docstring""" snake_case_ = self.scorer.score(references=UpperCAmelCase_ , candidates=UpperCAmelCase_ ) return {"scores": scores}
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __SCREAMING_SNAKE_CASE : Tuple = 'src/diffusers' # Matches is_xxx_available() __SCREAMING_SNAKE_CASE : Any = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla __SCREAMING_SNAKE_CASE : str = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') __SCREAMING_SNAKE_CASE : Optional[int] = '\n{0} = None\n' __SCREAMING_SNAKE_CASE : Optional[int] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' __SCREAMING_SNAKE_CASE : Optional[int] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = _re_backend.findall(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 0: return None return "_and_".join(_SCREAMING_SNAKE_CASE ) def _a ( ) -> int: with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ = f.readlines() # Get to the point we do the actual imports for type checking snake_case_ = 0 snake_case_ = {} # Go through the end of the file while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line contains is_backend_available, we grab all objects associated with the `else` block snake_case_ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 snake_case_ = [] # Until we unindent, add backend objects to the list while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1: snake_case_ = lines[line_index] snake_case_ = _re_single_line_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_SCREAMING_SNAKE_CASE ) > 0: snake_case_ = objects else: line_index += 1 return backend_specific_objects def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: if name.isupper(): return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE ) elif name.islower(): return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE=None ) -> Dict: if backend_specific_objects is None: snake_case_ = read_init() # For special correspondence backend to module name as used in the function requires_modulename snake_case_ = {} for backend, objects in backend_specific_objects.items(): snake_case_ = """[""" + """, """.join(f"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]""" snake_case_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] ) snake_case_ = dummy_file return dummy_files def _a ( _SCREAMING_SNAKE_CASE=False ) -> List[str]: snake_case_ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py snake_case_ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """utils""" ) snake_case_ = { backend: os.path.join(_SCREAMING_SNAKE_CASE , f"""dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py""" ) for backend in dummy_files.keys() } snake_case_ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ = f.read() else: snake_case_ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main """ """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f"""diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """ """to fix this.""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __SCREAMING_SNAKE_CASE : str = parser.parse_args() check_dummies(args.fix_and_overwrite)
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) snake_case_ = str(bin(_SCREAMING_SNAKE_CASE ) ) binary_number += "0" * shift_amount return binary_number def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) snake_case_ = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] if shift_amount >= len(_SCREAMING_SNAKE_CASE ): return "0b0" snake_case_ = binary_number[: len(_SCREAMING_SNAKE_CASE ) - shift_amount] return "0b" + shifted_binary_number def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if number >= 0: # Get binary representation of positive number snake_case_ = """0""" + str(bin(_SCREAMING_SNAKE_CASE ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number snake_case_ = len(bin(_SCREAMING_SNAKE_CASE )[3:] ) # Find 2's complement of number snake_case_ = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:] snake_case_ = ( """1""" + """0""" * (binary_number_length - len(_SCREAMING_SNAKE_CASE )) + binary_number ) if shift_amount >= len(_SCREAMING_SNAKE_CASE ): return "0b" + binary_number[0] * len(_SCREAMING_SNAKE_CASE ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(_SCREAMING_SNAKE_CASE ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
1
"""simple docstring""" from PIL import Image def _a ( _SCREAMING_SNAKE_CASE ) -> Image: snake_case_ , snake_case_ = image.size snake_case_ = 0 snake_case_ = image.load() for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ = pixels[j, i] mean += pixel mean //= width * height for j in range(_SCREAMING_SNAKE_CASE ): for i in range(_SCREAMING_SNAKE_CASE ): snake_case_ = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) snake_case_ = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = str(_SCREAMING_SNAKE_CASE ) dataset_info.write_to_directory(_SCREAMING_SNAKE_CASE ) snake_case_ = DatasetInfo.from_directory(_SCREAMING_SNAKE_CASE ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """dataset_info.json""" ) ) def _a ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(_SCREAMING_SNAKE_CASE ) snake_case_ = yaml.safe_load(_SCREAMING_SNAKE_CASE ) assert dataset_info_yaml_dict == reloaded def _a ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] , ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = str(_SCREAMING_SNAKE_CASE ) dataset_infos_dict.write_to_directory(_SCREAMING_SNAKE_CASE ) snake_case_ = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """README.md""" ) )
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : '''simple docstring''' @staticmethod def lowerCAmelCase ( *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Union[str, Any]: """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class __A (unittest.TestCase): '''simple docstring''' __lowercase: List[str] = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ) ->List[Any]: """simple docstring""" snake_case_ = ObjectDetectionPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) ->Union[str, Any]: """simple docstring""" snake_case_ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(UpperCAmelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase_ , { """score""": ANY(UpperCAmelCase_ ), """label""": ANY(UpperCAmelCase_ ), """box""": {"""xmin""": ANY(UpperCAmelCase_ ), """ymin""": ANY(UpperCAmelCase_ ), """xmax""": ANY(UpperCAmelCase_ ), """ymax""": ANY(UpperCAmelCase_ )}, } , ) import datasets snake_case_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) snake_case_ = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] snake_case_ = object_detector(UpperCAmelCase_ , threshold=0.0 ) self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for outputs in batch_outputs: self.assertGreater(len(UpperCAmelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase_ , { """score""": ANY(UpperCAmelCase_ ), """label""": ANY(UpperCAmelCase_ ), """box""": {"""xmin""": ANY(UpperCAmelCase_ ), """ymin""": ANY(UpperCAmelCase_ ), """xmax""": ANY(UpperCAmelCase_ ), """ymax""": ANY(UpperCAmelCase_ )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" pass @require_torch def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = """hf-internal-testing/tiny-detr-mobilenetsv3""" snake_case_ = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) snake_case_ = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) snake_case_ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) snake_case_ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = """facebook/detr-resnet-50""" snake_case_ = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) snake_case_ = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) snake_case_ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) snake_case_ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ = """facebook/detr-resnet-50""" snake_case_ = pipeline("""object-detection""" , model=UpperCAmelCase_ ) snake_case_ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) snake_case_ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" snake_case_ = 0.9_985 snake_case_ = """facebook/detr-resnet-50""" snake_case_ = pipeline("""object-detection""" , model=UpperCAmelCase_ ) snake_case_ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=UpperCAmelCase_ ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def lowerCAmelCase ( self : List[str] ) ->List[Any]: """simple docstring""" snake_case_ = """Narsil/layoutlmv3-finetuned-funsd""" snake_case_ = 0.9_993 snake_case_ = pipeline("""object-detection""" , model=UpperCAmelCase_ , threshold=UpperCAmelCase_ ) snake_case_ = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Any = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : Dict = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : Any = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Optional[Any] = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : Tuple = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : List[Any] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : List[str] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : List[str] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: snake_case_ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) snake_case_ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) snake_case_ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _SCREAMING_SNAKE_CASE ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _SCREAMING_SNAKE_CASE ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _SCREAMING_SNAKE_CASE ) # start time snake_case_ = time.time() # Run inference context.execute_async( bindings=[int(_SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Synchronize the stream and take time stream.synchronize() # end time snake_case_ = time.time() snake_case_ = end_time - start_time snake_case_ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Any = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : Tuple = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : int = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : int = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Any = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length) def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace snake_case_ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. snake_case_ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. snake_case_ = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. snake_case_ = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). snake_case_ = tokenized_examples.sequence_ids(_SCREAMING_SNAKE_CASE ) snake_case_ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. snake_case_ = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. snake_case_ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : str = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[str] = default_data_collator __SCREAMING_SNAKE_CASE : int = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="eval" ) -> Optional[int]: # Post-processing: we match the start logits and end logits to answers in the original context. snake_case_ = postprocess_qa_predictions( examples=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_SCREAMING_SNAKE_CASE , ) # Format the result to the format the metric expects. if args.version_2_with_negative: snake_case_ = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: snake_case_ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] snake_case_ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _a ( _SCREAMING_SNAKE_CASE ) -> int: return trt.volume(engine.get_binding_shape(_SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(_SCREAMING_SNAKE_CASE ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : str = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : List[Any] = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __SCREAMING_SNAKE_CASE : List[Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : Tuple = timeit.default_timer() __SCREAMING_SNAKE_CASE : int = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = outputs __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : int = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : Tuple = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[str] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
347
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 50 ) -> int: snake_case_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __SCREAMING_SNAKE_CASE : Optional[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.' def _a ( _SCREAMING_SNAKE_CASE=None ) -> Dict: if subparsers is not None: snake_case_ = subparsers.add_parser("""tpu-config""" , description=_description ) else: snake_case_ = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments snake_case_ = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=_SCREAMING_SNAKE_CASE , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=_SCREAMING_SNAKE_CASE , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) snake_case_ = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=_SCREAMING_SNAKE_CASE , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ): snake_case_ = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: snake_case_ = defaults.command_file if not args.command and defaults.commands is not None: snake_case_ = defaults.commands if not args.tpu_name: snake_case_ = defaults.tpu_name if not args.tpu_zone: snake_case_ = defaults.tpu_zone if args.accelerate_version == "dev": snake_case_ = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": snake_case_ = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: snake_case_ = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ): snake_case_ = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate snake_case_ = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f"""pip install {args.accelerate_version}"""] new_cmd += args.command snake_case_ = """; """.join(_SCREAMING_SNAKE_CASE ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess snake_case_ = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"""Running {" ".join(_SCREAMING_SNAKE_CASE )}""" ) return subprocess.run(_SCREAMING_SNAKE_CASE ) print("""Successfully setup pod.""" ) def _a ( ) -> List[Any]: snake_case_ = tpu_command_parser() snake_case_ = parser.parse_args() tpu_command_launcher(_SCREAMING_SNAKE_CASE )
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
"""simple docstring""" from copy import deepcopy class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : list[int] | None = None , UpperCAmelCase_ : int | None = None ) ->None: """simple docstring""" if arr is None and size is not None: snake_case_ = size snake_case_ = [0] * size elif arr is not None: self.init(UpperCAmelCase_ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : list[int] ) ->None: """simple docstring""" snake_case_ = len(UpperCAmelCase_ ) snake_case_ = deepcopy(UpperCAmelCase_ ) for i in range(1 , self.size ): snake_case_ = self.next_(UpperCAmelCase_ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCAmelCase ( self : Dict ) ->list[int]: """simple docstring""" snake_case_ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case_ = self.next_(UpperCAmelCase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : int ) ->int: """simple docstring""" return index + (index & (-index)) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : int ) ->int: """simple docstring""" return index - (index & (-index)) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->None: """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case_ = self.next_(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->None: """simple docstring""" self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : int ) ->int: """simple docstring""" if right == 0: return 0 snake_case_ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case_ = self.prev(UpperCAmelCase_ ) return result def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->int: """simple docstring""" return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int ) ->int: """simple docstring""" return self.query(UpperCAmelCase_ , index + 1 ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int ) ->int: """simple docstring""" value -= self.tree[0] if value < 0: return -1 snake_case_ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case_ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __SCREAMING_SNAKE_CASE : Optional[int] = (720, 1_280) # Height, Width __SCREAMING_SNAKE_CASE : str = (0.4, 0.6) # if height or width lower than this scale, drop it. __SCREAMING_SNAKE_CASE : Tuple = 1 / 100 __SCREAMING_SNAKE_CASE : List[Any] = '' __SCREAMING_SNAKE_CASE : int = '' __SCREAMING_SNAKE_CASE : int = '' __SCREAMING_SNAKE_CASE : Dict = 250 def _a ( ) -> None: snake_case_ , snake_case_ = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for index in range(_SCREAMING_SNAKE_CASE ): snake_case_ = random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 4 ) snake_case_ , snake_case_ , snake_case_ = update_image_and_anno( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , filter_scale=_SCREAMING_SNAKE_CASE , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] snake_case_ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(f"""{file_root}.jpg""" , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) snake_case_ = [] for anno in new_annos: snake_case_ = anno[3] - anno[1] snake_case_ = anno[4] - anno[2] snake_case_ = anno[1] + width / 2 snake_case_ = anno[2] + height / 2 snake_case_ = f"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(_SCREAMING_SNAKE_CASE ) with open(f"""{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[list, list]: snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , """*.txt""" ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(_SCREAMING_SNAKE_CASE ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip("""\n""" ).split(""" """ ) snake_case_ = float(obj[1] ) - float(obj[3] ) / 2 snake_case_ = float(obj[2] ) - float(obj[4] ) / 2 snake_case_ = float(obj[1] ) + float(obj[3] ) / 2 snake_case_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_SCREAMING_SNAKE_CASE ) labels.append(_SCREAMING_SNAKE_CASE ) return img_paths, labels def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , ) -> tuple[list, list, str]: snake_case_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = int(scale_x * output_size[1] ) snake_case_ = int(scale_y * output_size[0] ) snake_case_ = [] snake_case_ = [] for i, index in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = all_img_list[index] path_list.append(_SCREAMING_SNAKE_CASE ) snake_case_ = all_annos[index] snake_case_ = cva.imread(_SCREAMING_SNAKE_CASE ) if i == 0: # top-left snake_case_ = cva.resize(_SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = bbox[2] * scale_y snake_case_ = bbox[3] * scale_x snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right snake_case_ = cva.resize(_SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = bbox[2] * scale_y snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left snake_case_ = cva.resize(_SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = bbox[3] * scale_x snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right snake_case_ = cva.resize( _SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: snake_case_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def _a ( _SCREAMING_SNAKE_CASE ) -> str: assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main() print('DONE ✅')
347
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) snake_case_ = (boundary[1] - boundary[0]) / steps snake_case_ = boundary[0] snake_case_ = boundary[1] snake_case_ = make_points(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = a + h while x < (b - h): yield x snake_case_ = x + h def _a ( _SCREAMING_SNAKE_CASE ) -> str: # enter your function here snake_case_ = (x - 0) * (x - 0) return y def _a ( ) -> Dict: snake_case_ = 0.0 # Lower bound of integration snake_case_ = 1.0 # Upper bound of integration snake_case_ = 10.0 # define number of steps or resolution snake_case_ = [a, b] # define boundary of integration snake_case_ = method_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
347
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class __A (snake_case__): '''simple docstring''' __lowercase: Tuple = """t5""" __lowercase: Optional[Any] = ["""past_key_values"""] __lowercase: Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Optional[int] , UpperCAmelCase_ : Any=32_128 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Any=128 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=1E-6 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any="relu" , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : List[Any]=1 , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]: """simple docstring""" snake_case_ = vocab_size snake_case_ = d_model snake_case_ = d_kv snake_case_ = d_ff snake_case_ = num_layers snake_case_ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry snake_case_ = num_heads snake_case_ = relative_attention_num_buckets snake_case_ = relative_attention_max_distance snake_case_ = dropout_rate snake_case_ = layer_norm_epsilon snake_case_ = initializer_factor snake_case_ = feed_forward_proj snake_case_ = use_cache snake_case_ = self.feed_forward_proj.split("""-""" ) snake_case_ = act_info[-1] snake_case_ = act_info[0] == """gated""" if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": snake_case_ = """gelu_new""" super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , ) class __A (snake_case__): '''simple docstring''' @property def lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" snake_case_ = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: snake_case_ = """past_encoder_sequence + sequence""" snake_case_ = {0: """batch"""} snake_case_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: snake_case_ = {0: """batch""", 1: """decoder_sequence"""} snake_case_ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" ) return common_inputs @property def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" return 13
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } snake_case_ = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) ) def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) ) @require_torch def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) ) @require_tf def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) ) @require_flax def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) ) snake_case_ = np.random.randn(3 , 4 , 5 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) ) def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) ) snake_case_ = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) ) @require_torch def lowerCAmelCase ( self : Any ) ->Union[str, Any]: """simple docstring""" snake_case_ = np.random.randn(1 , 3 , 4 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) ) snake_case_ = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) ) @require_tf def lowerCAmelCase ( self : Tuple ) ->str: """simple docstring""" snake_case_ = np.random.randn(1 , 3 , 4 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) ) snake_case_ = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) ) @require_flax def lowerCAmelCase ( self : int ) ->Union[str, Any]: """simple docstring""" snake_case_ = np.random.randn(1 , 3 , 4 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) ) snake_case_ = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) ) def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) ) @require_torch def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = torch.tensor(UpperCAmelCase_ ) self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) ) @require_tf def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = tf.constant(UpperCAmelCase_ ) self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) ) @require_flax def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = np.random.randn(3 , 4 ) snake_case_ = jnp.array(UpperCAmelCase_ ) self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : int = 'MobileNetV1Config' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] = 'google/mobilenet_v1_1.0_224' __SCREAMING_SNAKE_CASE : str = [1, 1_024, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Union[str, Any] = 'google/mobilenet_v1_1.0_224' __SCREAMING_SNAKE_CASE : int = 'tabby, tabby cat' __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: snake_case_ = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = model.mobilenet_va else: snake_case_ = model snake_case_ = """MobilenetV1/Conv2d_0/""" snake_case_ = backbone.conv_stem.convolution.weight snake_case_ = backbone.conv_stem.normalization.bias snake_case_ = backbone.conv_stem.normalization.weight snake_case_ = backbone.conv_stem.normalization.running_mean snake_case_ = backbone.conv_stem.normalization.running_var for i in range(13 ): snake_case_ = i + 1 snake_case_ = i * 2 snake_case_ = backbone.layer[pt_index] snake_case_ = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" snake_case_ = pointer.convolution.weight snake_case_ = pointer.normalization.bias snake_case_ = pointer.normalization.weight snake_case_ = pointer.normalization.running_mean snake_case_ = pointer.normalization.running_var snake_case_ = backbone.layer[pt_index + 1] snake_case_ = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" snake_case_ = pointer.convolution.weight snake_case_ = pointer.normalization.bias snake_case_ = pointer.normalization.weight snake_case_ = pointer.normalization.running_mean snake_case_ = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = """MobilenetV1/Logits/Conv2d_1c_1x1/""" snake_case_ = model.classifier.weight snake_case_ = model.classifier.bias return tf_to_pt_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model snake_case_ = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) snake_case_ = {} for name, shape in init_vars: logger.info(f"""Loading TF weight {name} with shape {shape}""" ) snake_case_ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = array # Build TF to PyTorch weights loading map snake_case_ = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(f"""Importing {name}""" ) if name not in tf_weights: logger.info(f"""{name} not in tf pre-trained weights, skipping""" ) continue snake_case_ = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) snake_case_ = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer snake_case_ = array.squeeze().transpose() else: snake_case_ = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" ) snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE ) logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> torch.Tensor: snake_case_ , snake_case_ = features.shape[-2:] snake_case_ , snake_case_ = conv_layer.stride snake_case_ , snake_case_ = conv_layer.kernel_size if in_height % stride_height == 0: snake_case_ = max(kernel_height - stride_height , 0 ) else: snake_case_ = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: snake_case_ = max(kernel_width - stride_width , 0 ) else: snake_case_ = max(kernel_width - (in_width % stride_width) , 0 ) snake_case_ = pad_along_width // 2 snake_case_ = pad_along_width - pad_left snake_case_ = pad_along_height // 2 snake_case_ = pad_along_height - pad_top snake_case_ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 ) class __A (nn.Module): '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool or str] = True , ) ->None: """simple docstring""" super().__init__() snake_case_ = config if in_channels % groups != 0: raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) snake_case_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) snake_case_ = nn.Convad( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , ) if use_normalization: snake_case_ = nn.BatchNormad( num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , ) else: snake_case_ = None if use_activation: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = ACTaFN[use_activation] elif isinstance(config.hidden_act , UpperCAmelCase_ ): snake_case_ = ACTaFN[config.hidden_act] else: snake_case_ = config.hidden_act else: snake_case_ = None def lowerCAmelCase ( self : int , UpperCAmelCase_ : torch.Tensor ) ->torch.Tensor: """simple docstring""" if self.config.tf_padding: snake_case_ = apply_tf_padding(UpperCAmelCase_ , self.convolution ) snake_case_ = self.convolution(UpperCAmelCase_ ) if self.normalization is not None: snake_case_ = self.normalization(UpperCAmelCase_ ) if self.activation is not None: snake_case_ = self.activation(UpperCAmelCase_ ) return features class __A (snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = MobileNetVaConfig __lowercase: List[str] = load_tf_weights_in_mobilenet_va __lowercase: Tuple = """mobilenet_v1""" __lowercase: List[Any] = """pixel_values""" __lowercase: Union[str, Any] = False def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[nn.Linear, nn.Convad] ) ->None: """simple docstring""" if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCAmelCase_ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __SCREAMING_SNAKE_CASE : int = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __SCREAMING_SNAKE_CASE : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case__ , ) class __A (snake_case__): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : bool = True ) ->List[Any]: """simple docstring""" super().__init__(UpperCAmelCase_ ) snake_case_ = config snake_case_ = 32 snake_case_ = max(int(depth * config.depth_multiplier ) , config.min_depth ) snake_case_ = MobileNetVaConvLayer( UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , ) snake_case_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] snake_case_ = nn.ModuleList() for i in range(13 ): snake_case_ = out_channels if strides[i] == 2 or i == 0: depth *= 2 snake_case_ = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: """simple docstring""" snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) snake_case_ = self.conv_stem(UpperCAmelCase_ ) snake_case_ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): snake_case_ = layer_module(UpperCAmelCase_ ) if output_hidden_states: snake_case_ = all_hidden_states + (hidden_states,) snake_case_ = hidden_states if self.pooler is not None: snake_case_ = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 ) else: snake_case_ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , ) @add_start_docstrings( """ MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , snake_case__ , ) class __A (snake_case__): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : MobileNetVaConfig ) ->None: """simple docstring""" super().__init__(UpperCAmelCase_ ) snake_case_ = config.num_labels snake_case_ = MobileNetVaModel(UpperCAmelCase_ ) snake_case_ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head snake_case_ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ ) snake_case_ = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->Union[tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(self.dropout(UpperCAmelCase_ ) ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = """single_label_classification""" else: snake_case_ = """multi_label_classification""" if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Dict = { 'configuration_roberta_prelayernorm': [ 'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaPreLayerNormConfig', 'RobertaPreLayerNormOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = [ 'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = [ 'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = [ 'FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = 'tiny-wmt19-en-ru' # Build # borrowed from a test __SCREAMING_SNAKE_CASE : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(vocab, range(len(vocab)))) __SCREAMING_SNAKE_CASE : List[str] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE : Optional[int] = Path(tmpdirname) __SCREAMING_SNAKE_CASE : str = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] __SCREAMING_SNAKE_CASE : Optional[int] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] __SCREAMING_SNAKE_CASE : Tuple = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) __SCREAMING_SNAKE_CASE : Union[str, Any] = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __SCREAMING_SNAKE_CASE : Any = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __SCREAMING_SNAKE_CASE : Optional[int] = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __SCREAMING_SNAKE_CASE : int = tokenizer(['Making tiny model'], return_tensors='pt') __SCREAMING_SNAKE_CASE : List[Any] = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import cmath import math def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> complex: snake_case_ = math.radians(_SCREAMING_SNAKE_CASE ) snake_case_ = math.radians(_SCREAMING_SNAKE_CASE ) # Convert voltage and current to rectangular form snake_case_ = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" snake_case_ = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) snake_case_ = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" snake_case_ = model(UpperCAmelCase_ )["""last_hidden_state"""] snake_case_ = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # compare the actual values for a slice. snake_case_ = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __SCREAMING_SNAKE_CASE : str = 4 __SCREAMING_SNAKE_CASE : Dict = 3 class __A (snake_case__): '''simple docstring''' pass def _a ( _SCREAMING_SNAKE_CASE ) -> int: for shard in shards: for i in range(_SCREAMING_SNAKE_CASE ): yield {"i": i, "shard": shard} def _a ( ) -> Optional[int]: snake_case_ = int(os.environ["""RANK"""] ) snake_case_ = int(os.environ["""WORLD_SIZE"""] ) snake_case_ = ArgumentParser() parser.add_argument("""--streaming""" , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--local_rank""" , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--num_workers""" , type=_SCREAMING_SNAKE_CASE , default=0 ) snake_case_ = parser.parse_args() snake_case_ = args.streaming snake_case_ = args.num_workers snake_case_ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(_SCREAMING_SNAKE_CASE )]} snake_case_ = IterableDataset.from_generator(_SCREAMING_SNAKE_CASE , gen_kwargs=_SCREAMING_SNAKE_CASE ) if not streaming: snake_case_ = Dataset.from_list(list(_SCREAMING_SNAKE_CASE ) ) snake_case_ = split_dataset_by_node(_SCREAMING_SNAKE_CASE , rank=_SCREAMING_SNAKE_CASE , world_size=_SCREAMING_SNAKE_CASE ) snake_case_ = torch.utils.data.DataLoader(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE ) snake_case_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD snake_case_ = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) snake_case_ = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __SCREAMING_SNAKE_CASE : int = 250_004 __SCREAMING_SNAKE_CASE : Union[str, Any] = 250_020 @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: str = MBartTokenizer __lowercase: Tuple = MBartTokenizerFast __lowercase: int = True __lowercase: Tuple = True def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" snake_case_ = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) snake_case_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) snake_case_ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class __A (unittest.TestCase): '''simple docstring''' __lowercase: List[str] = """facebook/mbart-large-en-ro""" __lowercase: List[str] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] __lowercase: str = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] __lowercase: Optional[int] = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE] @classmethod def lowerCAmelCase ( cls : int ) ->str: """simple docstring""" snake_case_ = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) snake_case_ = 1 return cls def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 ) def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids ) snake_case_ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] snake_case_ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , UpperCAmelCase_ ) snake_case_ = 10 snake_case_ = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] ) def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase_ ) snake_case_ = MBartTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ ) @require_torch def lowerCAmelCase ( self : List[str] ) ->Tuple: """simple docstring""" snake_case_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors="""pt""" ) snake_case_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" snake_case_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) snake_case_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors="""pt""" ) snake_case_ = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors="""pt""" ) snake_case_ = targets["""input_ids"""] snake_case_ = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCAmelCase ( self : str ) ->int: """simple docstring""" snake_case_ = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3_034, 2, 250_004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250_001, } , )
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
1
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=_SCREAMING_SNAKE_CASE ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE ) with open(f"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(f"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open( f"""{class_data_dir}/images.txt""" , """w""" ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images["""url"""] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _a ( ) -> Any: snake_case_ = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=_SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A : '''simple docstring''' __lowercase: List[str] __lowercase: Optional[str] = None # Automatically constructed __lowercase: ClassVar[str] = "dict" __lowercase: ClassVar[Any] = None __lowercase: str = field(default="""Translation""" , init=snake_case__ , repr=snake_case__) def __call__( self : str ) ->int: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCAmelCase ( self : str ) ->Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class __A : '''simple docstring''' __lowercase: Optional[List] = None __lowercase: Optional[int] = None __lowercase: Optional[str] = None # Automatically constructed __lowercase: ClassVar[str] = "dict" __lowercase: ClassVar[Any] = None __lowercase: str = field(default="""TranslationVariableLanguages""" , init=snake_case__ , repr=snake_case__) def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = sorted(set(self.languages ) ) if self.languages else None snake_case_ = len(self.languages ) if self.languages else None def __call__( self : str ) ->str: """simple docstring""" return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ) ->Any: """simple docstring""" snake_case_ = set(self.languages ) if self.languages and set(UpperCAmelCase_ ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(UpperCAmelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCAmelCase_ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. snake_case_ = [] for lang, text in translation_dict.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. snake_case_ , snake_case_ = zip(*sorted(UpperCAmelCase_ ) ) return {"language": languages, "translation": translations} def lowerCAmelCase ( self : Any ) ->Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __A (snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = (IPNDMScheduler,) __lowercase: str = (("""num_inference_steps""", 50),) def lowerCAmelCase ( self : Tuple , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" snake_case_ = {"""num_train_timesteps""": 1_000} config.update(**UpperCAmelCase_ ) return config def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("""num_inference_steps""" , UpperCAmelCase_ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**UpperCAmelCase_ ) snake_case_ = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[:] if time_step is None: snake_case_ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase_ ) snake_case_ = scheduler_class.from_pretrained(UpperCAmelCase_ ) new_scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[:] snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[Any]=0 , **UpperCAmelCase_ : Union[str, Any] ) ->Any: """simple docstring""" snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("""num_inference_steps""" , UpperCAmelCase_ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[:] if time_step is None: snake_case_ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase_ ) snake_case_ = scheduler_class.from_pretrained(UpperCAmelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[:] snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self : int , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**UpperCAmelCase_ ) snake_case_ = scheduler_class(**UpperCAmelCase_ ) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample return sample def lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("""num_inference_steps""" , UpperCAmelCase_ ) for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**UpperCAmelCase_ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase_ , """set_timesteps""" ): scheduler.set_timesteps(UpperCAmelCase_ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , """set_timesteps""" ): snake_case_ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case_ = dummy_past_residuals[:] snake_case_ = scheduler.timesteps[5] snake_case_ = scheduler.timesteps[6] snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ , time_step=UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : str = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : str = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
"""simple docstring""" import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A (snake_case__): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" with self.assertRaises(UpperCAmelCase_ ): snake_case_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" with self.assertRaises(UpperCAmelCase_ ): snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]: """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def lowerCAmelCase ( self : List[str] ) ->Tuple: """simple docstring""" snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowerCAmelCase ( self : List[str] ) ->Optional[int]: """simple docstring""" snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def lowerCAmelCase ( self : int ) ->Union[str, Any]: """simple docstring""" snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def lowerCAmelCase ( self : List[str] ) ->Tuple: """simple docstring""" import PIL.Image snake_case_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=UpperCAmelCase_ ) as mock_cast_to_python_objects: snake_case_ = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) ) snake_case_ , snake_case_ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , UpperCAmelCase_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE ) snake_case_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE ) snake_case_ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = pa.BufferOutputStream() snake_case_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _a ( ) -> Optional[Any]: snake_case_ = pa.BufferOutputStream() snake_case_ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata snake_case_ = pa.BufferReader(output.getvalue() ) snake_case_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE ) snake_case_ = f.read_all() snake_case_ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def _a ( _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer: with pytest.raises(_SCREAMING_SNAKE_CASE ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) snake_case_ , snake_case_ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer: with pytest.raises(_SCREAMING_SNAKE_CASE ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) snake_case_ , snake_case_ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def _a ( _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = pa.BufferOutputStream() snake_case_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = pa.BufferOutputStream() snake_case_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: snake_case_ = pa.BufferOutputStream() snake_case_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _a ( ) -> int: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """test.arrow""" ) with ArrowWriter(path=_SCREAMING_SNAKE_CASE , schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata ) _check_output(_SCREAMING_SNAKE_CASE , 1 ) def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if pa.types.is_list(_SCREAMING_SNAKE_CASE ): return get_base_dtype(arr_type.value_type ) else: return arr_type def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: if isinstance(lst[0] , _SCREAMING_SNAKE_CASE ): change_first_primitive_element_in_list(lst[0] , _SCREAMING_SNAKE_CASE ) else: snake_case_ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: snake_case_ = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE , optimized_int_type=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: # in range snake_case_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications snake_case_ = copy.deepcopy(_SCREAMING_SNAKE_CASE ) snake_case_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = """mock://dataset-train.arrow""" with ArrowWriter(path=_SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_SCREAMING_SNAKE_CASE ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_SCREAMING_SNAKE_CASE ) def _a ( ) -> Optional[int]: snake_case_ = pa.BufferOutputStream() with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) snake_case_ , snake_case_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 snake_case_ = pa.BufferReader(output.getvalue() ) snake_case_ = pq.read_table(_SCREAMING_SNAKE_CASE ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: import PIL.Image snake_case_ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE , format="""png""" ) snake_case_ = pa.BufferOutputStream() with ParquetWriter( stream=_SCREAMING_SNAKE_CASE , features=Features({"""image""": Image()} ) , embed_local_files=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""image""": image_path} ) writer.finalize() snake_case_ = pa.BufferReader(output.getvalue() ) snake_case_ = pq.read_table(_SCREAMING_SNAKE_CASE ) snake_case_ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , _SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def _a ( ) -> List[Any]: snake_case_ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_SCREAMING_SNAKE_CASE )] ) snake_case_ = pa.BufferOutputStream() with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer: writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
347
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
1
"""simple docstring""" from typing import Any def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> list: _validation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) # Creates data structures and fill initial step snake_case_ = {} snake_case_ = {} for state in states_space: snake_case_ = observations_space[0] snake_case_ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case_ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ): snake_case_ = observations_space[o] snake_case_ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case_ = """""" snake_case_ = -1 for k_state in states_space: snake_case_ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case_ = probability snake_case_ = k_state # Update probabilities and pointers dicts snake_case_ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case_ = arg_max # The final observation snake_case_ = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1] # argmax for given final observation snake_case_ = """""" snake_case_ = -1 for k_state in states_space: snake_case_ = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case_ = probability snake_case_ = k_state snake_case_ = arg_max # Process pointers backwards snake_case_ = last_state snake_case_ = [] for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): result.append(_SCREAMING_SNAKE_CASE ) snake_case_ = pointers[previous, observations_space[o]] result.reverse() return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: _validate_not_empty( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) _validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _validate_dicts( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: _validate_list(_SCREAMING_SNAKE_CASE , """observations_space""" ) _validate_list(_SCREAMING_SNAKE_CASE , """states_space""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: if not isinstance(_object , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""{var_name} must be a list""" raise ValueError(_SCREAMING_SNAKE_CASE ) else: for x in _object: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""{var_name} must be a list of strings""" raise ValueError(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: _validate_dict(_SCREAMING_SNAKE_CASE , """initial_probabilities""" , _SCREAMING_SNAKE_CASE ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , """transition_probabilities""" ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , """emission_probabilities""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: _validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values(): _validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> None: if not isinstance(_object , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""{var_name} must be a dict""" raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ): snake_case_ = f"""{var_name} all keys must be strings""" raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ): snake_case_ = """nested dictionary """ if nested else """""" snake_case_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": from doctest import testmod testmod()
347
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: str = CanineTokenizer __lowercase: Union[str, Any] = False def lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" super().setUp() snake_case_ = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCAmelCase ( self : str ) ->int: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->CanineTokenizer: """simple docstring""" snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) snake_case_ = 1_024 return tokenizer @require_torch def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.canine_tokenizer snake_case_ = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off snake_case_ = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCAmelCase ( self : int ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.canine_tokenizer snake_case_ = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , UpperCAmelCase_ ) self.assertIn("""attention_mask""" , UpperCAmelCase_ ) self.assertIn("""token_type_ids""" , UpperCAmelCase_ ) @require_torch def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" snake_case_ = self.canine_tokenizer snake_case_ = [ """What's the weater?""", """It's about 25 degrees.""", ] snake_case_ = tokenizer( text_target=UpperCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=UpperCAmelCase_ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = """ He is very happy, UNwant\u00E9d,running""" snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) snake_case_ = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) shutil.rmtree(UpperCAmelCase_ ) snake_case_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = """ He is very happy, UNwant\u00E9d,running""" snake_case_ = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: snake_case_ = chr(0XE007 ) additional_special_tokens.append(UpperCAmelCase_ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) snake_case_ = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) snake_case_ = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIn(UpperCAmelCase_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) snake_case_ = tokenizer.__class__.from_pretrained(UpperCAmelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ , snake_case_ = self.get_clean_sequence(UpperCAmelCase_ ) # a special token for Canine can be defined as follows: snake_case_ = 0XE005 snake_case_ = chr(UpperCAmelCase_ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , 1 ) snake_case_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , input_encoded + special_token_id ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) self.assertTrue(special_token not in decoded ) def lowerCAmelCase ( self : List[str] ) ->Optional[int]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = chr(0XE005 ) snake_case_ = chr(0XE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) snake_case_ = tokenizer.tokenize(UpperCAmelCase_ ) snake_case_ = tokenizer.tokenize(UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , 1 ) self.assertEqual(len(UpperCAmelCase_ ) , 1 ) self.assertEqual(token_a[0] , UpperCAmelCase_ ) self.assertEqual(token_a[0] , UpperCAmelCase_ ) @require_tokenizers def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: snake_case_ = 0XE006 snake_case_ = chr(UpperCAmelCase_ ) snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(UpperCAmelCase_ ) tokenizer.from_pretrained(UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" snake_case_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: snake_case_ = json.load(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: snake_case_ = json.load(UpperCAmelCase_ ) # a special token for Canine can be defined as follows: snake_case_ = 0XE006 snake_case_ = chr(UpperCAmelCase_ ) snake_case_ = [new_token_a] snake_case_ = [new_token_a] with open(os.path.join(UpperCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case_ = tokenizer_class.from_pretrained(UpperCAmelCase_ , extra_ids=0 ) self.assertIn(UpperCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) snake_case_ = 0XE007 snake_case_ = chr(UpperCAmelCase_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case_ = [AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ )] snake_case_ = tokenizer_class.from_pretrained( UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , extra_ids=0 ) self.assertIn(UpperCAmelCase_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCAmelCase ( self : Dict ) ->Optional[int]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = """hello world""" if self.space_between_special_tokens: snake_case_ = """[CLS] hello world [SEP]""" else: snake_case_ = input snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(UpperCAmelCase_ , [output, output.lower()] ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] snake_case_ = """a""" snake_case_ = ord(UpperCAmelCase_ ) for attr in attributes_list: setattr(UpperCAmelCase_ , attr + """_id""" , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , attr + """_id""" ) , UpperCAmelCase_ ) setattr(UpperCAmelCase_ , attr + """_id""" , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_ , attr + """_id""" ) , UpperCAmelCase_ ) setattr(UpperCAmelCase_ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(UpperCAmelCase_ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(UpperCAmelCase_ , """additional_special_tokens_ids""" ) , [] ) snake_case_ = 0XE006 snake_case_ = chr(UpperCAmelCase_ ) setattr(UpperCAmelCase_ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(UpperCAmelCase_ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(UpperCAmelCase_ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" pass def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" pass def lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" pass def lowerCAmelCase ( self : int ) ->int: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" pass
347
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __SCREAMING_SNAKE_CASE : List[Any] = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: List[str] = list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) __lowercase: List[int] = list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""}) __lowercase: List[int] = list_field( default=[8, 32, 1_28, 5_12] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) __lowercase: bool = field( default=snake_case__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) __lowercase: bool = field( default=snake_case__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) __lowercase: bool = field( default=snake_case__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Use FP16 to accelerate inference."""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Benchmark training of model"""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Verbose memory tracing"""}) __lowercase: bool = field( default=snake_case__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) __lowercase: bool = field( default=snake_case__ , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Trace memory line by line"""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Save result to a CSV file"""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Save all print statements in a log file"""}) __lowercase: bool = field(default=snake_case__ , metadata={"""help""": """Whether to print environment information"""}) __lowercase: bool = field( default=snake_case__ , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) __lowercase: str = field( default=f'''inference_time_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) __lowercase: str = field( default=f'''inference_memory_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) __lowercase: str = field( default=f'''train_time_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) __lowercase: str = field( default=f'''train_memory_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) __lowercase: str = field( default=f'''env_info_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , ) __lowercase: str = field( default=f'''log_{round(time())}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) __lowercase: int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""}) __lowercase: bool = field( default=snake_case__ , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def lowerCAmelCase ( self : int ) ->Tuple: """simple docstring""" warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , UpperCAmelCase_ , ) def lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def lowerCAmelCase ( self : List[Any] ) ->Any: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: snake_case_ = XCLIPTextConfig() # derive patch size from model name snake_case_ = model_name.find("""patch""" ) snake_case_ = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ = XCLIPVisionConfig(patch_size=_SCREAMING_SNAKE_CASE , num_frames=_SCREAMING_SNAKE_CASE ) if "large" in model_name: snake_case_ = 768 snake_case_ = 3_072 snake_case_ = 12 snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 16 snake_case_ = 24 snake_case_ = 768 snake_case_ = 3_072 if model_name == "xclip-large-patch14-16-frames": snake_case_ = 336 snake_case_ = XCLIPConfig.from_text_vision_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "large" in model_name: snake_case_ = 768 return config def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: # text encoder if name == "token_embedding.weight": snake_case_ = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: snake_case_ = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: snake_case_ = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: snake_case_ = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: snake_case_ = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "attn.in_proj" in key: snake_case_ = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ = key_split[3] snake_case_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ = val[ :dim, : ] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[ -dim:, : ] else: snake_case_ = val[ :dim ] snake_case_ = val[ dim : dim * 2 ] snake_case_ = val[ -dim: ] else: if "weight" in key: snake_case_ = val[ :dim, : ] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[ -dim:, : ] else: snake_case_ = val[:dim] snake_case_ = val[ dim : dim * 2 ] snake_case_ = val[-dim:] elif key.startswith("""mit""" ): snake_case_ = key_split[2] snake_case_ = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = key_split[2] snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[ dim : dim * 2 ] snake_case_ = val[-dim:] else: snake_case_ = rename_key(_SCREAMING_SNAKE_CASE ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ = val.T snake_case_ = val return orig_state_dict def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: if num_frames == 8: snake_case_ = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ = """eating_spaghetti_32_frames.npy""" snake_case_ = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) snake_case_ = np.load(_SCREAMING_SNAKE_CASE ) return list(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Tuple: snake_case_ = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ = model_to_url[model_name] snake_case_ = 8 if "16-frames" in model_name: snake_case_ = 16 elif "shot" in model_name: snake_case_ = 32 snake_case_ = get_xclip_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = XCLIPModel(_SCREAMING_SNAKE_CASE ) model.eval() if "drive" in checkpoint_url: snake_case_ = """pytorch_model.bin""" gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE ) snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""] else: snake_case_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE )["""model"""] snake_case_ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = XCLIPModel(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ = VideoMAEImageProcessor(size=_SCREAMING_SNAKE_CASE ) snake_case_ = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ = XCLIPProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) snake_case_ = prepare_video(_SCREAMING_SNAKE_CASE ) snake_case_ = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ = model(**_SCREAMING_SNAKE_CASE ) # Verify outputs snake_case_ = outputs.logits_per_video snake_case_ = logits_per_video.softmax(dim=1 ) print("""Probs:""" , _SCREAMING_SNAKE_CASE ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": snake_case_ = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": snake_case_ = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" ) processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" ) slow_tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='xclip-base-patch32', type=str, help='Name of the model.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : List[Any] = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'AI-Sweden/gpt-sw3-126m': 2_048, 'AI-Sweden/gpt-sw3-350m': 2_048, 'AI-Sweden/gpt-sw3-1.6b': 2_048, 'AI-Sweden/gpt-sw3-6.7b': 2_048, 'AI-Sweden/gpt-sw3-20b': 2_048, } class __A (snake_case__): '''simple docstring''' __lowercase: List[str] = VOCAB_FILES_NAMES __lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowercase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: Tuple = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None: """simple docstring""" snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs snake_case_ = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) snake_case_ = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing snake_case_ = """<|endoftext|>""" if eos_token is None else eos_token snake_case_ = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: snake_case_ = unk_token if pad_token is None else pad_token snake_case_ = eos_token if bos_token is None else bos_token else: snake_case_ = """<pad>""" if pad_token is None else pad_token snake_case_ = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) snake_case_ = do_lower_case snake_case_ = remove_space snake_case_ = keep_accents snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) # Used for whitespace normalization in input texts # fmt : off snake_case_ = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing snake_case_ = re.compile( F"""[{"".join(map(UpperCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" ) def __getstate__( self : str ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Union[str, Any] , UpperCAmelCase_ : int ) ->Dict: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" return len(self.sp_model ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->str: """simple docstring""" snake_case_ = self.non_printing_characters_re.sub("""""" , UpperCAmelCase_ ) # Normalize whitespaces snake_case_ = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization snake_case_ = unicodedata.normalize("""NFC""" , UpperCAmelCase_ ) return text def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = self.preprocess_text(UpperCAmelCase_ ) return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->int: """simple docstring""" return self.sp_model.PieceToId(UpperCAmelCase_ ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : int ) ->str: """simple docstring""" return self.sp_model.IdToPiece(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str ) ->str: """simple docstring""" return out_string def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[str] ) ->str: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string def lowerCAmelCase ( self : int ) ->Dict[str, int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[str, bool] = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = self.preprocess_text(UpperCAmelCase_ ) snake_case_ = self.sp_model.encode(UpperCAmelCase_ ) else: snake_case_ = [self.preprocess_text(UpperCAmelCase_ ) for t in text] snake_case_ = self.sp_model.encode(UpperCAmelCase_ ) if return_tensors is True or return_tensors == "pt": snake_case_ = torch.tensor(UpperCAmelCase_ ) return token_ids def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[int, List[int]] ) ->str: """simple docstring""" return self.sp_model.decode(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : "Conversation" ) ->List[int]: """simple docstring""" snake_case_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] snake_case_ = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCAmelCase_ ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=UpperCAmelCase_ )
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class __A (datasets.BuilderConfig): '''simple docstring''' __lowercase: int = 1_00_00 __lowercase: Optional[List[str]] = None __lowercase: Optional[datasets.Features] = None class __A (datasets.ArrowBasedBuilder): '''simple docstring''' __lowercase: Optional[int] = ParquetConfig def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): snake_case_ = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] snake_case_ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(UpperCAmelCase_ ): with open(UpperCAmelCase_ , """rb""" ) as f: snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_ ) ) break splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"""files""": files} ) ) return splits def lowerCAmelCase ( self : int , UpperCAmelCase_ : pa.Table ) ->pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example snake_case_ = table_cast(UpperCAmelCase_ , self.info.features.arrow_schema ) return pa_table def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): with open(UpperCAmelCase_ , """rb""" ) as f: snake_case_ = pq.ParquetFile(UpperCAmelCase_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): snake_case_ = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase_ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Dict=125 , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Optional[Any] , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = extra_ids snake_case_ = 2**8 # utf is 8 bits # define special tokens dict snake_case_ = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } snake_case_ = len(self.special_tokens_encoder ) snake_case_ = len(UpperCAmelCase_ ) for i, token in enumerate(UpperCAmelCase_ ): snake_case_ = self.vocab_size + i - n snake_case_ = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str ) ->List[str]: """simple docstring""" snake_case_ = [chr(UpperCAmelCase_ ) for i in text.encode("""utf-8""" )] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ) ->str: """simple docstring""" if token in self.special_tokens_encoder: snake_case_ = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: snake_case_ = self.added_tokens_encoder[token] elif len(UpperCAmelCase_ ) != 1: snake_case_ = self.unk_token_id else: snake_case_ = ord(UpperCAmelCase_ ) + self._num_special_tokens return token_id def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->int: """simple docstring""" if index in self.special_tokens_decoder: snake_case_ = self.special_tokens_decoder[index] else: snake_case_ = chr(index - self._num_special_tokens ) return token def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = B"""""" for token in tokens: if token in self.special_tokens_decoder: snake_case_ = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: snake_case_ = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: snake_case_ = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: snake_case_ = token.encode("""utf-8""" ) else: snake_case_ = bytes([ord(UpperCAmelCase_ )] ) bstring += tok_string snake_case_ = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" return ()
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __SCREAMING_SNAKE_CASE : List[Any] = random.Random() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict: if rng is None: snake_case_ = global_rng snake_case_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __A (unittest.TestCase): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Tuple=400 , UpperCAmelCase_ : Tuple=2_000 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : str=160 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : int=4_000 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=True , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = min_seq_length snake_case_ = max_seq_length snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ = padding_value snake_case_ = sampling_rate snake_case_ = return_attention_mask snake_case_ = do_normalize snake_case_ = feature_size snake_case_ = chunk_length snake_case_ = hop_length def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=False ) ->Optional[Any]: """simple docstring""" def _flatten(UpperCAmelCase_ : Tuple ): return list(itertools.chain(*UpperCAmelCase_ ) ) if equal_length: snake_case_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Optional[int] = WhisperFeatureExtractor if is_speech_available() else None def lowerCAmelCase ( self : str ) ->List[Any]: """simple docstring""" snake_case_ = WhisperFeatureExtractionTester(self ) def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0] check_json_file_has_correct_format(UpperCAmelCase_ ) snake_case_ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ ) snake_case_ = feat_extract_first.to_dict() snake_case_ = feat_extract_second.to_dict() snake_case_ = feat_extract_first.mel_filters snake_case_ = feat_extract_second.mel_filters self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(UpperCAmelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCAmelCase_ ) snake_case_ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ ) snake_case_ = feat_extract_first.to_dict() snake_case_ = feat_extract_second.to_dict() snake_case_ = feat_extract_first.mel_filters snake_case_ = feat_extract_second.mel_filters self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case_ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs] # Test feature size snake_case_ = feature_extractor(UpperCAmelCase_ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test batched snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ = np.asarray(UpperCAmelCase_ ) snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test truncation required snake_case_ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case_ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs] snake_case_ = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated] snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) def lowerCAmelCase ( self : List[Any] ) ->Any: """simple docstring""" import torch snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech snake_case_ = ds.sort("""id""" ).select(range(UpperCAmelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" snake_case_ = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case_ = self._load_datasamples(1 ) snake_case_ = WhisperFeatureExtractor() snake_case_ = feature_extractor(UpperCAmelCase_ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) ) def lowerCAmelCase ( self : int ) ->Optional[Any]: """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ = self._load_datasamples(1 )[0] snake_case_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue snake_case_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0] self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" class __A : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False ) ->None: """simple docstring""" snake_case_ = {} # A node will be a leaf if the tree contains its word snake_case_ = is_leaf snake_case_ = prefix def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->tuple[str, str, str]: """simple docstring""" snake_case_ = 0 for q, w in zip(self.prefix , UpperCAmelCase_ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : list[str] ) ->None: """simple docstring""" for word in words: self.insert(UpperCAmelCase_ ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None: """simple docstring""" if self.prefix == word: snake_case_ = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: snake_case_ = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_ ) else: snake_case_ = self.nodes[word[0]] snake_case_ , snake_case_ , snake_case_ = incoming_node.match( UpperCAmelCase_ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(UpperCAmelCase_ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: snake_case_ = remaining_prefix snake_case_ = self.nodes[matching_string[0]] snake_case_ = RadixNode(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = aux_node if remaining_word == "": snake_case_ = True else: self.nodes[matching_string[0]].insert(UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->bool: """simple docstring""" snake_case_ = self.nodes.get(word[0] , UpperCAmelCase_ ) if not incoming_node: return False else: snake_case_ , snake_case_ , snake_case_ = incoming_node.match( UpperCAmelCase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(UpperCAmelCase_ ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str ) ->bool: """simple docstring""" snake_case_ = self.nodes.get(word[0] , UpperCAmelCase_ ) if not incoming_node: return False else: snake_case_ , snake_case_ , snake_case_ = incoming_node.match( UpperCAmelCase_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(UpperCAmelCase_ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: snake_case_ = list(self.nodes.values() )[0] snake_case_ = merging_node.is_leaf self.prefix += merging_node.prefix snake_case_ = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: snake_case_ = False # If there is 1 edge, we merge it with its child else: snake_case_ = list(incoming_node.nodes.values() )[0] snake_case_ = merging_node.is_leaf incoming_node.prefix += merging_node.prefix snake_case_ = merging_node.nodes return True def lowerCAmelCase ( self : str , UpperCAmelCase_ : int = 0 ) ->None: """simple docstring""" if self.prefix != "": print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def _a ( ) -> bool: snake_case_ = """banana bananas bandana band apple all beast""".split() snake_case_ = RadixNode() root.insert_many(_SCREAMING_SNAKE_CASE ) assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def _a ( ) -> None: assert test_trie() def _a ( ) -> None: snake_case_ = RadixNode() snake_case_ = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(_SCREAMING_SNAKE_CASE ) print("""Words:""" , _SCREAMING_SNAKE_CASE ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __SCREAMING_SNAKE_CASE : List[str] = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __SCREAMING_SNAKE_CASE : List[Any] = dataset.iloc[:, 1:2].values __SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.iloc[:, 2].values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = train_test_split(X, y, test_size=0.2, random_state=0) __SCREAMING_SNAKE_CASE : List[str] = PolynomialFeatures(degree=4) __SCREAMING_SNAKE_CASE : List[str] = poly_reg.fit_transform(X) __SCREAMING_SNAKE_CASE : Dict = LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> Union[str, Any]: plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""red""" ) plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _a ( _SCREAMING_SNAKE_CASE ) -> None: snake_case_ , snake_case_ = analyze_text(_SCREAMING_SNAKE_CASE ) snake_case_ = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. snake_case_ = sum(single_char_strings.values() ) # one length string snake_case_ = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case_ = single_char_strings[ch] snake_case_ = my_str / all_sum my_fir_sum += prob * math.loga(_SCREAMING_SNAKE_CASE ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string snake_case_ = sum(two_char_strings.values() ) snake_case_ = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case_ = cha + cha if sequence in two_char_strings: snake_case_ = two_char_strings[sequence] snake_case_ = int(_SCREAMING_SNAKE_CASE ) / all_sum my_sec_sum += prob * math.loga(_SCREAMING_SNAKE_CASE ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _a ( _SCREAMING_SNAKE_CASE ) -> tuple[dict, dict]: snake_case_ = Counter() # type: ignore snake_case_ = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _a ( ) -> Optional[int]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _a ( ) -> Dict: snake_case_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" ) return image def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: snake_case_ = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") ) # fmt: on return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict snake_case_ = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) snake_case_ = qkv_bias def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 364 if """coco""" in model_name else 224 snake_case_ = InstructBlipVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: snake_case_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: snake_case_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: snake_case_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: snake_case_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32_001 ).to_dict() else: raise ValueError("""Model name not supported""" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 snake_case_ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() snake_case_ = InstructBlipConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE , qformer_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Tuple: snake_case_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" ) qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} ) if "t5" in model_name: snake_case_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) snake_case_ = LlamaTokenizerFast.from_pretrained( """huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" ) tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} ) snake_case_ , snake_case_ = get_blipa_config(_SCREAMING_SNAKE_CASE ) snake_case_ = InstructBlipForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() snake_case_ = { """instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""), """instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""), """instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""), """instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""), } snake_case_ , snake_case_ = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) snake_case_ = """cuda:1""" if torch.cuda.is_available() else """cpu""" snake_case_ = """cuda:2""" if torch.cuda.is_available() else """cpu""" snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) original_model.eval() print("""Done!""" ) # update state dict keys snake_case_ = original_model.state_dict() snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith("""Qformer.bert""" ): snake_case_ = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: snake_case_ = key.replace("""self""" , """attention""" ) if "llm_proj" in key: snake_case_ = key.replace("""llm_proj""" , """language_projection""" ) if "t5_proj" in key: snake_case_ = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""llm_model""" ): snake_case_ = key.replace("""llm_model""" , """language_model""" ) if key.startswith("""t5""" ): snake_case_ = key.replace("""t5""" , """language""" ) snake_case_ = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) snake_case_ = load_demo_image() snake_case_ = """What is unusual about this image?""" # create processor snake_case_ = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE ) snake_case_ = InstructBlipProcessor( image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE , ) snake_case_ = processor(images=_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values snake_case_ = vis_processors["""eval"""](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) snake_case_ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "vicuna" in model_name: snake_case_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits snake_case_ = hf_model(**_SCREAMING_SNAKE_CASE ).logits else: snake_case_ = original_model( {"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits snake_case_ = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE ) snake_case_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) snake_case_ = hf_model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape snake_case_ = 1E-4 if """vicuna""" in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) print("""Looks ok!""" ) print("""Generating with original model...""" ) snake_case_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("""Generating with HF model...""" ) snake_case_ = hf_model.generate( **_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? snake_case_ = 2 print("""Original generation:""" , _SCREAMING_SNAKE_CASE ) snake_case_ = processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ = [text.strip() for text in output_text] print("""HF generation:""" , _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(f"""Salesforce/{model_name}""" ) hf_model.push_to_hub(f"""Salesforce/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() __SCREAMING_SNAKE_CASE : Union[str, Any] = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def _a ( _SCREAMING_SNAKE_CASE="ro" , _SCREAMING_SNAKE_CASE="en" , _SCREAMING_SNAKE_CASE="wmt16" , _SCREAMING_SNAKE_CASE=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) snake_case_ = f"""{src_lang}-{tgt_lang}""" print(f"""Converting {dataset}-{pair}""" ) snake_case_ = datasets.load_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if save_dir is None: snake_case_ = f"""{dataset}-{pair}""" snake_case_ = Path(_SCREAMING_SNAKE_CASE ) save_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for split in ds.keys(): print(f"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets snake_case_ = """val""" if split == """validation""" else split snake_case_ = save_dir.joinpath(f"""{fn}.source""" ) snake_case_ = save_dir.joinpath(f"""{fn}.target""" ) snake_case_ = src_path.open("""w+""" ) snake_case_ = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): snake_case_ = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(f"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
1
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """roformer""" def __init__( self : List[Any] , UpperCAmelCase_ : Tuple=50_000 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=1_536 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size if embedding_size is None else embedding_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = rotary_value snake_case_ = use_cache class __A (snake_case__): '''simple docstring''' @property def lowerCAmelCase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": snake_case_ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ = {0: """batch""", 1: """sequence"""} snake_case_ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __SCREAMING_SNAKE_CASE : Any = 'src/transformers' __SCREAMING_SNAKE_CASE : Optional[int] = 'docs/source/en' __SCREAMING_SNAKE_CASE : Any = '.' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ = f.readlines() # Find the start prompt. snake_case_ = 0 while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ): start_index += 1 start_index += 1 snake_case_ = start_index while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __SCREAMING_SNAKE_CASE : Optional[Any] = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. __SCREAMING_SNAKE_CASE : Any = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __SCREAMING_SNAKE_CASE : int = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. __SCREAMING_SNAKE_CASE : int = direct_transformers_import(TRANSFORMERS_PATH) def _a ( _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _SCREAMING_SNAKE_CASE ) return [m.group(0 ) for m in matches] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = 2 if text == """✅""" or text == """❌""" else len(_SCREAMING_SNAKE_CASE ) snake_case_ = (width - text_length) // 2 snake_case_ = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _a ( ) -> Dict: snake_case_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } snake_case_ = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. snake_case_ = collections.defaultdict(_SCREAMING_SNAKE_CASE ) snake_case_ = collections.defaultdict(_SCREAMING_SNAKE_CASE ) snake_case_ = collections.defaultdict(_SCREAMING_SNAKE_CASE ) snake_case_ = collections.defaultdict(_SCREAMING_SNAKE_CASE ) snake_case_ = collections.defaultdict(_SCREAMING_SNAKE_CASE ) # Let's lookup through all transformers object (once). for attr_name in dir(_SCREAMING_SNAKE_CASE ): snake_case_ = None if attr_name.endswith("""Tokenizer""" ): snake_case_ = slow_tokenizers snake_case_ = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): snake_case_ = fast_tokenizers snake_case_ = attr_name[:-13] elif _re_tf_models.match(_SCREAMING_SNAKE_CASE ) is not None: snake_case_ = tf_models snake_case_ = _re_tf_models.match(_SCREAMING_SNAKE_CASE ).groups()[0] elif _re_flax_models.match(_SCREAMING_SNAKE_CASE ) is not None: snake_case_ = flax_models snake_case_ = _re_flax_models.match(_SCREAMING_SNAKE_CASE ).groups()[0] elif _re_pt_models.match(_SCREAMING_SNAKE_CASE ) is not None: snake_case_ = pt_models snake_case_ = _re_pt_models.match(_SCREAMING_SNAKE_CASE ).groups()[0] if lookup_dict is not None: while len(_SCREAMING_SNAKE_CASE ) > 0: if attr_name in model_name_to_prefix.values(): snake_case_ = True break # Try again after removing the last word in the name snake_case_ = """""".join(camel_case_split(_SCREAMING_SNAKE_CASE )[:-1] ) # Let's build that table! snake_case_ = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) snake_case_ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). snake_case_ = [len(_SCREAMING_SNAKE_CASE ) + 2 for c in columns] snake_case_ = max([len(_SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2 # Build the table per se snake_case_ = """|""" + """|""".join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" snake_case_ = {True: """✅""", False: """❌"""} for name in model_names: snake_case_ = model_name_to_prefix[name] snake_case_ = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for l, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + "|\n" return table def _a ( _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = _find_text_in_file( filename=os.path.join(_SCREAMING_SNAKE_CASE , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) snake_case_ = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_SCREAMING_SNAKE_CASE , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() check_model_table(args.fix_and_overwrite)
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __A (snake_case__): '''simple docstring''' __lowercase: List[str] = """""" __lowercase: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) __lowercase: str = None # compression type in fsspec. ex: "gzip" __lowercase: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : int , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" super().__init__(self , **UpperCAmelCase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ = fsspec.open( UpperCAmelCase_ , mode="""rb""" , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ = os.path.basename(self.file.path.split("""::""" )[0] ) snake_case_ = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) snake_case_ = None @classmethod def lowerCAmelCase ( cls : Optional[Any] , UpperCAmelCase_ : Tuple ) ->str: """simple docstring""" return super()._strip_protocol(UpperCAmelCase_ ).lstrip("""/""" ) def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" if self.dir_cache is None: snake_case_ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} snake_case_ = {f["""name"""]: f} def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return self.file.open().read() def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ) ->Union[str, Any]: """simple docstring""" snake_case_ = self._strip_protocol(UpperCAmelCase_ ) if mode != "rb": raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = """bz2""" __lowercase: str = """bz2""" __lowercase: str = """.bz2""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """gzip""" __lowercase: Tuple = """gzip""" __lowercase: List[str] = """.gz""" class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = """lz4""" __lowercase: Union[str, Any] = """lz4""" __lowercase: Dict = """.lz4""" class __A (snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = """xz""" __lowercase: List[Any] = """xz""" __lowercase: Tuple = """.xz""" class __A (snake_case__): '''simple docstring''' __lowercase: Tuple = """zstd""" __lowercase: Dict = """zstd""" __lowercase: str = """.zst""" def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]: """simple docstring""" super().__init__( fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ = self.file.__enter__ class __A : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = file_ def __enter__( self : Optional[Any] ) ->int: """simple docstring""" self._file.__enter__() return self def __exit__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ) ->str: """simple docstring""" self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __iter__( self : List[str] ) ->Union[str, Any]: """simple docstring""" return iter(self._file ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" return next(self._file ) def __getattr__( self : str , UpperCAmelCase_ : Optional[int] ) ->Tuple: """simple docstring""" return getattr(self._file , UpperCAmelCase_ ) def fixed_enter(*UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ): return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_ ) ) snake_case_ = fixed_enter
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _a ( *_SCREAMING_SNAKE_CASE ) -> Optional[int]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _a ( _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _a ( _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 128 ) -> int: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) snake_case_ = starting_batch_size def decorator(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): snake_case_ = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f"""Batch size was passed into `{function.__name__}` as the first argument when called.""" f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
347
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
1
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) snake_case_ = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _SCREAMING_SNAKE_CASE ) if matches: snake_case_ = float(matches[1] ) snake_case_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". snake_case_ = 1_001 snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = """huggingface/label-files""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} snake_case_ = """background""" snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def _a ( ) -> Dict: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: snake_case_ = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model snake_case_ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor snake_case_ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = model(**_SCREAMING_SNAKE_CASE ) snake_case_ = outputs.logits assert logits.shape == (1, 1_001) if model_name == "mobilenet_v1_1.0_224": snake_case_ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": snake_case_ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: snake_case_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing to the hub...""" ) snake_case_ = """google/""" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
347
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
1
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class __A (tf.keras.layers.Layer): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int = None , UpperCAmelCase_ : int = None ) ->Union[str, Any]: """simple docstring""" super().__init__() snake_case_ = pad_token_id snake_case_ = max_length snake_case_ = vocab snake_case_ = merges snake_case_ = BytePairTokenizer(UpperCAmelCase_ , UpperCAmelCase_ , sequence_length=UpperCAmelCase_ ) @classmethod def lowerCAmelCase ( cls : List[str] , UpperCAmelCase_ : GPTaTokenizer , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ) ->List[Any]: """simple docstring""" snake_case_ = [""" """.join(UpperCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()] snake_case_ = tokenizer.get_vocab() return cls(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) @classmethod def lowerCAmelCase ( cls : Optional[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ) ->Any: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) return cls.from_tokenizer(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) @classmethod def lowerCAmelCase ( cls : str , UpperCAmelCase_ : Tuple ) ->str: """simple docstring""" return cls(**UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int = None ) ->Tuple: """simple docstring""" snake_case_ = self.tf_tokenizer(UpperCAmelCase_ ) snake_case_ = tf.ones_like(UpperCAmelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length snake_case_ = max_length if max_length is not None else self.max_length if max_length is not None: snake_case_ , snake_case_ = pad_model_inputs( UpperCAmelCase_ , max_seq_length=UpperCAmelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000 ) -> int: return sum(e for e in range(3 , _SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
1
"""simple docstring""" from functools import reduce __SCREAMING_SNAKE_CASE : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def _a ( _SCREAMING_SNAKE_CASE = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) ) for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _a ( ) -> Dict: snake_case_ = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" ) return image def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict snake_case_ = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) snake_case_ = qkv_bias def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 364 if """coco""" in model_name else 224 snake_case_ = BlipaVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: snake_case_ = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict() elif "opt-6.7b" in model_name: snake_case_ = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict() elif "t5-xl" in model_name: snake_case_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: snake_case_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() snake_case_ = BlipaConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: snake_case_ = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) snake_case_ = tokenizer("""\n""" , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0] snake_case_ , snake_case_ = get_blipa_config(_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE ) snake_case_ = BlipaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() snake_case_ = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } snake_case_ , snake_case_ = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) snake_case_ = """cuda""" if torch.cuda.is_available() else """cpu""" snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) original_model.eval() print("""Done!""" ) # update state dict keys snake_case_ = original_model.state_dict() snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith("""Qformer.bert""" ): snake_case_ = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: snake_case_ = key.replace("""self""" , """attention""" ) if "opt_proj" in key: snake_case_ = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: snake_case_ = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): snake_case_ = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): snake_case_ = key.replace("""t5""" , """language""" ) snake_case_ = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ = hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] snake_case_ = load_demo_image() snake_case_ = vis_processors["""eval"""](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) snake_case_ = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE ) # create processor snake_case_ = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE ) snake_case_ = BlipaProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) snake_case_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "opt" in model_name: snake_case_ = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits snake_case_ = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).logits else: snake_case_ = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) snake_case_ = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": snake_case_ = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_SCREAMING_SNAKE_CASE ) assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": snake_case_ = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_SCREAMING_SNAKE_CASE ) else: # cast to same type snake_case_ = logits.dtype assert torch.allclose(original_logits.to(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , atol=1E-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) snake_case_ = """""" snake_case_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE ) snake_case_ = original_model.generate({"""image""": original_pixel_values} ) snake_case_ = hf_model.generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , _SCREAMING_SNAKE_CASE ) snake_case_ = input_ids.shape[1] snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ = [text.strip() for text in output_text] print("""HF generation:""" , _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(f"""nielsr/{model_name}""" ) hf_model.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() __SCREAMING_SNAKE_CASE : int = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
347
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
1
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __A (snake_case__ , snake_case__ , snake_case__): '''simple docstring''' @register_to_config def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , ) ->Any: """simple docstring""" super().__init__() snake_case_ = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = False snake_case_ = nn.Dropout(p=UpperCAmelCase_ ) snake_case_ = TaConfig( vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , ) snake_case_ = nn.ModuleList() for lyr_num in range(UpperCAmelCase_ ): snake_case_ = TaBlock(UpperCAmelCase_ ) self.encoders.append(UpperCAmelCase_ ) snake_case_ = TaLayerNorm(UpperCAmelCase_ ) snake_case_ = nn.Dropout(p=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->str: """simple docstring""" snake_case_ = self.token_embedder(UpperCAmelCase_ ) snake_case_ = encoder_input_tokens.shape[1] snake_case_ = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCAmelCase_ ) snake_case_ = self.dropout_pre(UpperCAmelCase_ ) # inverted the attention mask snake_case_ = encoder_input_tokens.size() snake_case_ = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ ) for lyr in self.encoders: snake_case_ = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0] snake_case_ = self.layer_norm(UpperCAmelCase_ ) return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __A (unittest.TestCase): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : List[Any]=30 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=True , ) ->Any: """simple docstring""" snake_case_ = size if size is not None else {"""height""": 18, """width""": 18} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = image_size snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize snake_case_ = size snake_case_ = apply_ocr def lowerCAmelCase ( self : Union[str, Any] ) ->Any: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase ( self : str ) ->Optional[int]: """simple docstring""" snake_case_ = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase ( self : List[Any] ) ->List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Tuple ) ->List[Any]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """apply_ocr""" ) ) def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase_ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase_ ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase ( self : Optional[Any] ) ->str: """simple docstring""" snake_case_ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) snake_case_ = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase_ ) self.assertListEqual(encoding.boxes , UpperCAmelCase_ ) # with apply_OCR = False snake_case_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" __SCREAMING_SNAKE_CASE : str = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: # Return True if there is node that has not iterated. snake_case_ = [False] * len(_SCREAMING_SNAKE_CASE ) snake_case_ = [s] snake_case_ = True while queue: snake_case_ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_SCREAMING_SNAKE_CASE ) snake_case_ = True snake_case_ = u return visited[t] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = [-1] * (len(_SCREAMING_SNAKE_CASE )) snake_case_ = 0 snake_case_ = [] snake_case_ = [i[:] for i in graph] # Record original cut, copy. while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = float("""Inf""" ) snake_case_ = sink while s != source: # Find the minimum value in select path snake_case_ = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] ) snake_case_ = parent[s] max_flow += path_flow snake_case_ = sink while v != source: snake_case_ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow snake_case_ = parent[v] for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class __A (snake_case__ , snake_case__): '''simple docstring''' __lowercase: Optional[int] = """nat""" __lowercase: Dict = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : Tuple=[3, 4, 6, 5] , UpperCAmelCase_ : Optional[Any]=[2, 4, 8, 16] , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[Any]=3.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=1E-5 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Optional[int] , ) ->Dict: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(UpperCAmelCase_ ) snake_case_ = num_heads snake_case_ = kernel_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = layer_norm_eps snake_case_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) snake_case_ = layer_scale_init_value snake_case_ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" from __future__ import annotations import math import random from typing import Any class __A : '''simple docstring''' def __init__( self : Optional[int] ) ->None: """simple docstring""" snake_case_ = [] snake_case_ = 0 snake_case_ = 0 def lowerCAmelCase ( self : List[str] ) ->bool: """simple docstring""" return self.head == self.tail def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any ) ->None: """simple docstring""" self.data.append(UpperCAmelCase_ ) snake_case_ = self.tail + 1 def lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" snake_case_ = self.data[self.head] snake_case_ = self.head + 1 return ret def lowerCAmelCase ( self : int ) ->int: """simple docstring""" return self.tail - self.head def lowerCAmelCase ( self : str ) ->None: """simple docstring""" print(self.data ) print("""**************""" ) print(self.data[self.head : self.tail] ) class __A : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : Any ) ->None: """simple docstring""" snake_case_ = data snake_case_ = None snake_case_ = None snake_case_ = 1 def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" return self.data def lowerCAmelCase ( self : Tuple ) ->MyNode | None: """simple docstring""" return self.left def lowerCAmelCase ( self : List[Any] ) ->MyNode | None: """simple docstring""" return self.right def lowerCAmelCase ( self : Union[str, Any] ) ->int: """simple docstring""" return self.height def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->None: """simple docstring""" snake_case_ = data def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : MyNode | None ) ->None: """simple docstring""" snake_case_ = node def lowerCAmelCase ( self : str , UpperCAmelCase_ : MyNode | None ) ->None: """simple docstring""" snake_case_ = node def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = height def _a ( _SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return node.get_height() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if a > b: return a return b def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode: print("""left rotation node:""" , node.get_data() ) snake_case_ = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_SCREAMING_SNAKE_CASE ) return ret def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode: print("""right rotation node:""" , node.get_data() ) snake_case_ = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_SCREAMING_SNAKE_CASE ) return ret def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode: snake_case_ = node.get_left() assert left_child is not None node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) ) return right_rotation(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode: snake_case_ = node.get_right() assert right_child is not None node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) ) return left_rotation(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None: if node is None: return MyNode(_SCREAMING_SNAKE_CASE ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _SCREAMING_SNAKE_CASE ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected snake_case_ = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child snake_case_ = right_rotation(_SCREAMING_SNAKE_CASE ) else: snake_case_ = lr_rotation(_SCREAMING_SNAKE_CASE ) else: node.set_right(insert_node(node.get_right() , _SCREAMING_SNAKE_CASE ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: snake_case_ = node.get_right() assert right_child is not None if data < right_child.get_data(): snake_case_ = rl_rotation(_SCREAMING_SNAKE_CASE ) else: snake_case_ = left_rotation(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_SCREAMING_SNAKE_CASE ) return node def _a ( _SCREAMING_SNAKE_CASE ) -> Any: while True: snake_case_ = root.get_right() if right_child is None: break snake_case_ = right_child return root.get_data() def _a ( _SCREAMING_SNAKE_CASE ) -> Any: while True: snake_case_ = root.get_left() if left_child is None: break snake_case_ = left_child return root.get_data() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None: snake_case_ = root.get_left() snake_case_ = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: snake_case_ = get_left_most(_SCREAMING_SNAKE_CASE ) root.set_data(_SCREAMING_SNAKE_CASE ) root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) elif left_child is not None: snake_case_ = left_child elif right_child is not None: snake_case_ = right_child else: return None elif root.get_data() > data: if left_child is None: print("""No such data""" ) return root else: root.set_left(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): snake_case_ = left_rotation(_SCREAMING_SNAKE_CASE ) else: snake_case_ = rl_rotation(_SCREAMING_SNAKE_CASE ) elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): snake_case_ = right_rotation(_SCREAMING_SNAKE_CASE ) else: snake_case_ = lr_rotation(_SCREAMING_SNAKE_CASE ) snake_case_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(_SCREAMING_SNAKE_CASE ) return root class __A : '''simple docstring''' def __init__( self : Dict ) ->None: """simple docstring""" snake_case_ = None def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" return get_height(self.root ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : Any ) ->None: """simple docstring""" print("""insert:""" + str(UpperCAmelCase_ ) ) snake_case_ = insert_node(self.root , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Any ) ->None: """simple docstring""" print("""delete:""" + str(UpperCAmelCase_ ) ) if self.root is None: print("""Tree is empty!""" ) return snake_case_ = del_node(self.root , UpperCAmelCase_ ) def __str__( self : Dict , ) ->str: # a level traversale, gives a more intuitive look on the tree """simple docstring""" snake_case_ = """""" snake_case_ = MyQueue() q.push(self.root ) snake_case_ = self.get_height() if layer == 0: return output snake_case_ = 0 while not q.is_empty(): snake_case_ = q.pop() snake_case_ = """ """ * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(UpperCAmelCase_ ) q.push(UpperCAmelCase_ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space snake_case_ = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , UpperCAmelCase_ ) - 1: snake_case_ = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _a ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() __SCREAMING_SNAKE_CASE : Any = AVLtree() __SCREAMING_SNAKE_CASE : Any = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : int = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __A (snake_case__): '''simple docstring''' __lowercase: List[str] = """blenderbot-small""" __lowercase: Union[str, Any] = ["""past_key_values"""] __lowercase: str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=50_265 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Optional[Any]=2_048 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Tuple=2_048 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=2 , **UpperCAmelCase_ : Any , ) ->Optional[Any]: """simple docstring""" snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , ) class __A (snake_case__): '''simple docstring''' @property def lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: snake_case_ = {0: """batch"""} snake_case_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: snake_case_ = {0: """batch""", 1: """decoder_sequence"""} snake_case_ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: snake_case_ , snake_case_ = self.num_layers for i in range(UpperCAmelCase_ ): snake_case_ = {0: """batch""", 2: """past_sequence + sequence"""} snake_case_ = {0: """batch""", 2: """past_sequence + sequence"""} else: snake_case_ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCAmelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(UpperCAmelCase_ , self ).outputs if self.use_past: snake_case_ , snake_case_ = self.num_layers for i in range(UpperCAmelCase_ ): snake_case_ = {0: """batch""", 2: """past_sequence + sequence"""} snake_case_ = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCAmelCase ( self : Any , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]: """simple docstring""" snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**UpperCAmelCase_ , **UpperCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case_ , snake_case_ = common_inputs["""input_ids"""].shape snake_case_ = common_inputs["""decoder_input_ids"""].shape[1] snake_case_ , snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ , snake_case_ = self.num_layers snake_case_ = min(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = max(UpperCAmelCase_ , UpperCAmelCase_ ) - min_num_layers snake_case_ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(UpperCAmelCase_ ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(UpperCAmelCase_ , UpperCAmelCase_ ): common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) ) return common_inputs def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]: """simple docstring""" snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case_ , snake_case_ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ , snake_case_ = self.num_layers snake_case_ , snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs["""attention_mask"""].dtype snake_case_ = torch.cat( [common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(UpperCAmelCase_ ) ] return common_inputs def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]: """simple docstring""" snake_case_ = compute_effective_axis_dimension( UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase_ ) snake_case_ = compute_effective_axis_dimension( UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) ) return common_inputs def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) elif self.task == "causal-lm": snake_case_ = self._generate_dummy_inputs_for_causal_lm( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) else: snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) return common_inputs def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ) ->Optional[int]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: snake_case_ = super(UpperCAmelCase_ , self )._flatten_past_key_values_( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> float: snake_case_ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: snake_case_ = 1 - (matter_density + radiation_density + dark_energy) snake_case_ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) snake_case_ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __SCREAMING_SNAKE_CASE : Any = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = range(2, 20 + 1) __SCREAMING_SNAKE_CASE : Optional[Any] = [10**k for k in range(ks[-1] + 1)] __SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {} def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: snake_case_ = sum(a_i[j] for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ) snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) ) snake_case_ , snake_case_ = 0, 0 snake_case_ = n - i snake_case_ = memo.get(_SCREAMING_SNAKE_CASE ) if sub_memo is not None: snake_case_ = sub_memo.get(_SCREAMING_SNAKE_CASE ) if jumps is not None and len(_SCREAMING_SNAKE_CASE ) > 0: # find and make the largest jump without going over snake_case_ = -1 for _k in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: snake_case_ = _k break if max_jump >= 0: snake_case_ , snake_case_ , snake_case_ = jumps[max_jump] # since the difference between jumps is cached, add c snake_case_ = diff + c for j in range(min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ): snake_case_ , snake_case_ = divmod(_SCREAMING_SNAKE_CASE , 10 ) if new_c > 0: add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: snake_case_ = [] else: snake_case_ = {c: []} snake_case_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps snake_case_ , snake_case_ = next_term(_SCREAMING_SNAKE_CASE , k - 1 , i + dn , _SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead snake_case_ , snake_case_ = compute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + dn , _SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped snake_case_ = sub_memo[c] # keep jumps sorted by # of terms skipped snake_case_ = 0 while j < len(_SCREAMING_SNAKE_CASE ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_SCREAMING_SNAKE_CASE , (diff, dn, k) ) return (diff, dn) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: if i >= n: return 0, i if k > len(_SCREAMING_SNAKE_CASE ): a_i.extend([0 for _ in range(k - len(_SCREAMING_SNAKE_CASE ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) snake_case_ = i snake_case_ , snake_case_ , snake_case_ = 0, 0, 0 for j in range(len(_SCREAMING_SNAKE_CASE ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 snake_case_ = ds_c + ds_b diff += addend snake_case_ = 0 for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ = a_i[j] + addend snake_case_ , snake_case_ = divmod(_SCREAMING_SNAKE_CASE , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return diff, i - start_i def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): snake_case_ = digits[j] + addend if s >= 10: snake_case_ , snake_case_ = divmod(_SCREAMING_SNAKE_CASE , 10 ) snake_case_ = addend // 10 + quotient else: snake_case_ = s snake_case_ = addend // 10 if addend == 0: break while addend > 0: snake_case_ , snake_case_ = divmod(_SCREAMING_SNAKE_CASE , 10 ) digits.append(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE = 10**15 ) -> int: snake_case_ = [1] snake_case_ = 1 snake_case_ = 0 while True: snake_case_ , snake_case_ = next_term(_SCREAMING_SNAKE_CASE , 20 , i + dn , _SCREAMING_SNAKE_CASE ) dn += terms_jumped if dn == n - i: break snake_case_ = 0 for j in range(len(_SCREAMING_SNAKE_CASE ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __A (snake_case__): '''simple docstring''' __lowercase: int = """mvp""" __lowercase: Optional[Any] = ["""past_key_values"""] __lowercase: List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=50_267 , UpperCAmelCase_ : Union[str, Any]=1_024 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : str=4_096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Dict=4_096 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=100 , UpperCAmelCase_ : Any=800 , **UpperCAmelCase_ : Optional[Any] , ) ->str: """simple docstring""" snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = classifier_dropout snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = use_prompt snake_case_ = prompt_length snake_case_ = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase_ ): snake_case_ = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') __SCREAMING_SNAKE_CASE : str = parser.parse_args() if args.model_type == "bert": __SCREAMING_SNAKE_CASE : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name) __SCREAMING_SNAKE_CASE : Tuple = 'bert' else: raise ValueError('args.model_type should be "bert".') __SCREAMING_SNAKE_CASE : Tuple = model.state_dict() __SCREAMING_SNAKE_CASE : int = {} for w in ["word_embeddings", "position_embeddings"]: __SCREAMING_SNAKE_CASE : Optional[int] = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: __SCREAMING_SNAKE_CASE : str = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] __SCREAMING_SNAKE_CASE : Optional[int] = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __SCREAMING_SNAKE_CASE : int = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] __SCREAMING_SNAKE_CASE : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] __SCREAMING_SNAKE_CASE : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] __SCREAMING_SNAKE_CASE : int = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] __SCREAMING_SNAKE_CASE : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] __SCREAMING_SNAKE_CASE : str = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] __SCREAMING_SNAKE_CASE : List[str] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] __SCREAMING_SNAKE_CASE : List[str] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 __SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['cls.predictions.decoder.weight'] __SCREAMING_SNAKE_CASE : Tuple = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: __SCREAMING_SNAKE_CASE : Tuple = state_dict[f"""cls.predictions.transform.dense.{w}"""] __SCREAMING_SNAKE_CASE : Optional[int] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
1
from __future__ import annotations import math def _a ( a :int , a :int , a :bool , a :list[int] , a :float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , a , a , a ) , minimax(depth + 1 , node_index * 2 + 1 , a , a , a ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , a , a , a ) , minimax(depth + 1 , node_index * 2 + 1 , a , a , a ) , ) ) def _a ( ) -> None: a = [90, 23, 6, 33, 21, 65, 123, 34_423] a = math.log(len(a ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , a , a , a )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
0
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0