code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->str: # load base model A =StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A =load_file(a_ ) A =[] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A =key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) A =pipeline.text_encoder else: A =key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) A =pipeline.unet # find the target layer A =layer_infos.pop(0 ) while len(a_ ) > -1: try: A =curr_layer.__getattr__(a_ ) if len(a_ ) > 0: A =layer_infos.pop(0 ) elif len(a_ ) == 0: break except Exception: if len(a_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A =layer_infos.pop(0 ) A =[] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(a_ ) else: pair_keys.append(a_ ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 ) else: A =state_dict[pair_keys[0]].to(torch.floataa ) A =state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ) # update visited list for item in pair_keys: visited.append(a_ ) return pipeline if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") __a = parser.parse_args() __a = args.base_model_path __a = args.checkpoint_path __a = args.dump_path __a = args.lora_prefix_unet __a = args.lora_prefix_text_encoder __a = args.alpha __a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
689
import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A =find_backend(" if not is_torch_available():" ) self.assertEqual(snake_case__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A =find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A =find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" ) def _a ( self : List[Any] ): """simple docstring""" A =read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , snake_case__ ) self.assertIn("torch_and_transformers" , snake_case__ ) self.assertIn("flax_and_transformers" , snake_case__ ) self.assertIn("torch_and_transformers_and_onnx" , snake_case__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def _a ( self : Dict ): """simple docstring""" A =create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(snake_case__ , "\nCONSTANT = None\n" ) A =create_dummy_object("function" , "'torch'" ) self.assertEqual( snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" A =create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , snake_case__ )
689
1
def UpperCamelCase_ ( a_ ) ->float: if edge <= 0 or not isinstance(a_ , a_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCamelCase_ ( a_ ) ->float: if edge <= 0 or not isinstance(a_ , a_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
689
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
1
def UpperCamelCase_ ( a_ = 1000 ) ->int: A , A =1, 1 A =[] for i in range(1 , n + 1 ): A =prev_numerator + 2 * prev_denominator A =prev_numerator + prev_denominator if len(str(a_ ) ) > len(str(a_ ) ): result.append(a_ ) A =numerator A =denominator return len(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int: try: A =int(a_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A =2 A =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A =i while n % i == 0: A =n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
1
def UpperCamelCase_ ( a_ , a_ ) ->list[int]: A =int(a_ ) # Initialize Result A =[] # Traverse through all denomination for denomination in reversed(a_ ): # Find denominations while int(a_ ) >= int(a_ ): total_value -= int(a_ ) answer.append(a_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __a = [] __a = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __a = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __a = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] __a = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __a = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
689
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
1
def UpperCamelCase_ ( a_ , a_ ) ->str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) A =str(bin(a_ ) )[2:] # remove the leading "0b" A =str(bin(a_ ) )[2:] # remove the leading "0b" A =max(len(a_ ) , len(a_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(a_ ) , b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
689
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
689
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __a = logging.getLogger(__name__) __a = tf.data.AUTOTUNE def UpperCamelCase_ ( ) ->str: A =argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=a_ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=a_ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=a_ , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=a_ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=a_ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=a_ , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=a_ , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=a_ , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=a_ , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=a_ , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=a_ , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=a_ , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=a_ , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=a_ , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=a_ , required=a_ , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=a_ , help="Model ID to upload to on the Hugging Face Hub." ) A =parser.parse_args() return args def UpperCamelCase_ ( a_ ) ->List[str]: try: if args.tpu_name: A =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: A =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(a_ ) tf.tpu.experimental.initialize_tpu_system(a_ ) return tpu def UpperCamelCase_ ( a_ ) ->Tuple: A =0 for file in file_list: A =file.split("/" )[-1] A =re.search(R"-\d+-(\d+)\.tfrecord" , a_ ).group(1 ) A =int(a_ ) num_samples += sample_count return num_samples def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , a_=None ) ->Tuple: A =count_samples(a_ ) A =tf.data.Dataset.from_tensor_slices(a_ ) if shuffle: A =dataset.shuffle(len(a_ ) ) A =tf.data.TFRecordDataset(a_ , num_parallel_reads=a_ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A =dataset.apply(tf.data.experimental.assert_cardinality(a_ ) ) A =dataset.map(a_ , num_parallel_calls=a_ ) if shuffle: assert shuffle_buffer_size is not None A =dataset.shuffle(args.shuffle_buffer_size ) A =dataset.batch(a_ , drop_remainder=a_ ) A =dataset.map(a_ , num_parallel_calls=a_ ) A =dataset.prefetch(a_ ) return dataset def UpperCamelCase_ ( a_ ) ->int: if not args.no_tpu: A =initialize_tpu(a_ ) A =tf.distribute.TPUStrategy(a_ ) else: A =tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) A =AutoTokenizer.from_pretrained(args.tokenizer ) A =AutoConfig.from_pretrained(args.pretrained_model_config ) A =tokenizer.vocab_size A =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) A =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) A =count_samples(a_ ) A =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A =steps_per_epoch * args.num_epochs with strategy.scope(): A =TFAutoModelForMaskedLM.from_config(a_ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A , A =create_optimizer( num_train_steps=a_ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=a_ , metrics=["accuracy"] ) def decode_fn(a_ ): A ={ "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(a_ , a_ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A =DataCollatorForLanguageModeling( tokenizer=a_ , mlm_probability=args.mlm_probability , mlm=a_ , return_tensors="tf" ) def mask_with_collator(a_ ): # TF really needs an isin() function A =( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) A , A =data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(a_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=a_ , ) return batch A =args.per_replica_batch_size * strategy.num_replicas_in_sync A =prepare_dataset( a_ , decode_fn=a_ , mask_fn=a_ , batch_size=a_ , shuffle=a_ , shuffle_buffer_size=args.shuffle_buffer_size , ) A =prepare_dataset( a_ , decode_fn=a_ , mask_fn=a_ , batch_size=a_ , shuffle=a_ , ) A =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=a_ ) ) model.fit( a_ , validation_data=a_ , epochs=args.num_epochs , callbacks=a_ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __a = parse_args() main(args)
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""MobileViTFeatureExtractor"""] __a = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): """simple docstring""" def __init__( self : Union[str, Any] , snake_case__ : str=None , **snake_case__ : str ): """simple docstring""" super().__init__(features=snake_case__ ) A =torch_tensor_kwargs import torch # noqa import torch at initialization def _a ( self : Optional[int] , snake_case__ : Optional[Any] ): """simple docstring""" import torch if isinstance(snake_case__ , snake_case__ ) and column: if all( isinstance(snake_case__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(snake_case__ ) return column def _a ( self : List[str] , snake_case__ : str ): """simple docstring""" import torch if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ): return value elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A ={} if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): A ={"dtype": torch.intaa} elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A ={"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(snake_case__ , PIL.Image.Image ): A =np.asarray(snake_case__ ) return torch.tensor(snake_case__ , **{**default_dtype, **self.torch_tensor_kwargs} ) def _a ( self : Union[str, Any] , snake_case__ : List[str] ): """simple docstring""" import torch # support for torch, tf, jax etc. if hasattr(snake_case__ , "__array__" ) and not isinstance(snake_case__ , torch.Tensor ): A =data_struct.__array__() # support for nested types like struct of list of struct if isinstance(snake_case__ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] ) elif isinstance(snake_case__ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] ) return self._tensorize(snake_case__ ) def _a ( self : Any , snake_case__ : dict ): """simple docstring""" return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ ) def _a ( self : Optional[Any] , snake_case__ : pa.Table ): """simple docstring""" A =self.numpy_arrow_extractor().extract_row(snake_case__ ) A =self.python_features_decoder.decode_row(snake_case__ ) return self.recursive_tensorize(snake_case__ ) def _a ( self : Tuple , snake_case__ : pa.Table ): """simple docstring""" A =self.numpy_arrow_extractor().extract_column(snake_case__ ) A =self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] ) A =self.recursive_tensorize(snake_case__ ) A =self._consolidate(snake_case__ ) return column def _a ( self : Union[str, Any] , snake_case__ : pa.Table ): """simple docstring""" A =self.numpy_arrow_extractor().extract_batch(snake_case__ ) A =self.python_features_decoder.decode_batch(snake_case__ ) A =self.recursive_tensorize(snake_case__ ) for column_name in batch: A =self._consolidate(batch[column_name] ) return batch
689
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase_ ( ) ->None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
689
1
def UpperCamelCase_ ( a_ ) ->int: A =len(a_ ) A =len(matrix[0] ) A =min(a_ , a_ ) for row in range(a_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , a_ ): A =matrix[col][row] / matrix[row][row] for i in range(a_ , a_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows A =True for i in range(row + 1 , a_ ): if matrix[i][row] != 0: A , A =matrix[i], matrix[row] A =False break if reduce: rank -= 1 for i in range(a_ ): A =matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
689
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
1
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __a = logging.get_logger(__name__) __a = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "gptj" _A = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[Any] , snake_case__ : str=5_04_00 , snake_case__ : List[str]=20_48 , snake_case__ : List[str]=40_96 , snake_case__ : Optional[int]=28 , snake_case__ : Dict=16 , snake_case__ : Tuple=64 , snake_case__ : str=None , snake_case__ : Optional[Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=1E-5 , snake_case__ : str=0.02 , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=5_02_56 , snake_case__ : List[str]=5_02_56 , snake_case__ : str=False , **snake_case__ : int , ): """simple docstring""" A =vocab_size A =n_positions A =n_embd A =n_layer A =n_head A =n_inner A =rotary_dim A =activation_function A =resid_pdrop A =embd_pdrop A =attn_pdrop A =layer_norm_epsilon A =initializer_range A =use_cache A =bos_token_id A =eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ): """simple docstring""" super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , "pad_token_id" , snake_case__ ): # TODO: how to do that better? A =0 @property def _a ( self : Tuple ): """simple docstring""" A =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction="inputs" ) A ={0: "batch", 1: "past_sequence + sequence"} else: A ={0: "batch", 1: "sequence"} return common_inputs @property def _a ( self : int ): """simple docstring""" return self._config.n_layer @property def _a ( self : List[str] ): """simple docstring""" return self._config.n_head def _a ( self : Dict , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ): """simple docstring""" A =super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() A =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A , A =common_inputs["input_ids"].shape # Not using the same length for past_key_values A =seqlen + 2 A =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A =[ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] A =common_inputs["attention_mask"] if self.use_past: A =ordered_inputs["attention_mask"].dtype A =torch.cat( [ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def _a ( self : Dict ): """simple docstring""" return 13
689
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = TextToVideoSDPipeline _A = TEXT_TO_IMAGE_PARAMS _A = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _A = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) A =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) A =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) A =CLIPTextModel(snake_case__ ) A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Any=0 ): """simple docstring""" if str(snake_case__ ).startswith("mps" ): A =torch.manual_seed(snake_case__ ) else: A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A ={ "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _a ( self : Optional[int] ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =self.get_dummy_components() A =TextToVideoSDPipeline(**snake_case__ ) A =sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A ="np" A =sd_pipe(**snake_case__ ).frames A =frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) A =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self : List[Any] ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _a ( self : str ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _a ( self : Optional[int] ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Any ): """simple docstring""" A =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) A =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) A =pipe.to("cuda" ) A ="Spiderman is surfing" A =torch.Generator(device="cpu" ).manual_seed(0 ) A =pipe(snake_case__ , generator=snake_case__ , num_inference_steps=25 , output_type="pt" ).frames A =video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def _a ( self : Union[str, Any] ): """simple docstring""" A =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) A =pipe.to("cuda" ) A ="Spiderman is surfing" A =torch.Generator(device="cpu" ).manual_seed(0 ) A =pipe(snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="pt" ).frames A =video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
689
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } __a = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } __a = { """ctrl""": 2_5_6, } __a = { """Pregnancy""": 1_6_8_6_2_9, """Christianity""": 7_6_7_5, """Explain""": 1_0_6_4_2_3, """Fitness""": 6_3_4_4_0, """Saving""": 6_3_1_6_3, """Ask""": 2_7_1_7_1, """Ass""": 9_5_9_8_5, """Joke""": 1_6_3_5_0_9, """Questions""": 4_5_6_2_2, """Thoughts""": 4_9_6_0_5, """Retail""": 5_2_3_4_2, """Feminism""": 1_6_4_3_3_8, """Writing""": 1_1_9_9_2, """Atheism""": 1_9_2_2_6_3, """Netflix""": 4_8_6_1_6, """Computing""": 3_9_6_3_9, """Opinion""": 4_3_2_1_3, """Alone""": 4_4_9_6_7, """Funny""": 5_8_9_1_7, """Gaming""": 4_0_3_5_8, """Human""": 4_0_8_8, """India""": 1_3_3_1, """Joker""": 7_7_1_3_8, """Diet""": 3_6_2_0_6, """Legal""": 1_1_8_5_9, """Norman""": 4_9_3_9, """Tip""": 7_2_6_8_9, """Weight""": 5_2_3_4_3, """Movies""": 4_6_2_7_3, """Running""": 2_3_4_2_5, """Science""": 2_0_9_0, """Horror""": 3_7_7_9_3, """Confession""": 6_0_5_7_2, """Finance""": 1_2_2_5_0, """Politics""": 1_6_3_6_0, """Scary""": 1_9_1_9_8_5, """Support""": 1_2_6_5_4, """Technologies""": 3_2_5_1_6, """Teenage""": 6_6_1_6_0, """Event""": 3_2_7_6_9, """Learned""": 6_7_4_6_0, """Notion""": 1_8_2_7_7_0, """Wikipedia""": 3_7_5_8_3, """Books""": 6_6_6_5, """Extract""": 7_6_0_5_0, """Confessions""": 1_0_2_7_0_1, """Conspiracy""": 7_5_9_3_2, """Links""": 6_3_6_7_4, """Narcissus""": 1_5_0_4_2_5, """Relationship""": 5_4_7_6_6, """Relationships""": 1_3_4_7_9_6, """Reviews""": 4_1_6_7_1, """News""": 4_2_5_6, """Translation""": 2_6_8_2_0, """multilingual""": 1_2_8_4_0_6, } def UpperCamelCase_ ( a_ ) ->List[str]: A =set() A =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A =char A =set(a_ ) return pairs class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = CONTROL_CODES def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ): """simple docstring""" super().__init__(unk_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: A =json.load(snake_case__ ) A ={v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: A =merges_handle.read().split("\n" )[1:-1] A =[tuple(merge.split() ) for merge in merges] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A ={} @property def _a ( self : str ): """simple docstring""" return len(self.encoder ) def _a ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : int , snake_case__ : Any ): """simple docstring""" if token in self.cache: return self.cache[token] A =tuple(snake_case__ ) A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A =get_pairs(snake_case__ ) if not pairs: return token while True: A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A , A =bigram A =[] A =0 while i < len(snake_case__ ): try: A =word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A =j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A =tuple(snake_case__ ) A =new_word if len(snake_case__ ) == 1: break else: A =get_pairs(snake_case__ ) A ="@@ ".join(snake_case__ ) A =word[:-4] A =word return word def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" A =[] A =re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _a ( self : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : Any ): """simple docstring""" A =" ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) A =0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A =token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
689
1
def UpperCamelCase_ ( a_ = 100_0000 ) ->int: A =set(range(3 , a_ , 2 ) ) primes.add(2 ) for p in range(3 , a_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , a_ , a_ ) ) ) A =[float(a_ ) for n in range(limit + 1 )] for p in primes: for n in range(a_ , limit + 1 , a_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
689
def UpperCamelCase_ ( a_ , a_ ) ->list[int]: A =int(a_ ) # Initialize Result A =[] # Traverse through all denomination for denomination in reversed(a_ ): # Find denominations while int(a_ ) >= int(a_ ): total_value -= int(a_ ) answer.append(a_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __a = [] __a = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __a = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __a = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] __a = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __a = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
689
1
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(a_ , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCamelCase_ ( a_ , a_ ) ->Dict: A =_distribute_shards(**a_ ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->Any: A =_split_gen_kwargs(a_ , a_ ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCamelCase_ ( a_ , a_ ) ->List[str]: if expected is RuntimeError: with pytest.raises(a_ ): _number_of_shards_in_gen_kwargs(a_ ) else: A =_number_of_shards_in_gen_kwargs(a_ ) assert out == expected
689
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = ["""model.decoder.embed_positions.weights"""] def UpperCamelCase_ ( a_ ) ->List[str]: if "emb" in name: A =name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: A =name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: A =name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: A =name.replace("linear1" , "fc1" ) if "linear2" in name: A =name.replace("linear2" , "fc2" ) if "norm1" in name: A =name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: A =name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: A =name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: A =name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: A =name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]: A =list(state_dict.keys() ) A ={} for key in keys: A =state_dict.pop(a_ ) A =rename_keys(a_ ) if "in_proj_weight" in key: # split fused qkv proj A =val[:hidden_size, :] A =val[hidden_size : 2 * hidden_size, :] A =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A =val else: A =val return state_dict, enc_dec_proj_state_dict def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig: if checkpoint == "small": # default config values A =1024 A =24 A =16 elif checkpoint == "medium": A =1536 A =48 A =24 elif checkpoint == "large": A =2048 A =48 A =32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) A =MusicgenDecoderConfig( hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , ) return config @torch.no_grad() def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]: A =MusicGen.get_pretrained(a_ , device=a_ ) A =decoder_config_from_checkpoint(a_ ) A =fairseq_model.lm.state_dict() A , A =rename_state_dict( a_ , hidden_size=decoder_config.hidden_size ) A =TaEncoderModel.from_pretrained("t5-base" ) A =EncodecModel.from_pretrained("facebook/encodec_32khz" ) A =MusicgenForCausalLM(a_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A , A =decoder.load_state_dict(a_ , strict=a_ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(a_ ) if len(a_ ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(a_ ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(a_ ) # check we can do a forward pass A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A =model(input_ids=a_ , decoder_input_ids=a_ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor A =AutoTokenizer.from_pretrained("t5-base" ) A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ ) # set the appropriate bos/pad token ids A =2048 A =2048 # set other default generation config params A =int(30 * audio_encoder.config.frame_rate ) A =True A =3.0 if pytorch_dump_folder is not None: Path(a_ ).mkdir(exist_ok=a_ ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(a_ ) processor.push_to_hub(a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) __a = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
1
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __a = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , snake_case__ : str , snake_case__ : Dict=7_68 ): """simple docstring""" super().__init__(snake_case__ ) A =proj_size A =CLIPVisionModel(snake_case__ ) A =PaintByExampleMapper(snake_case__ ) A =nn.LayerNorm(config.hidden_size ) A =nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling A =nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _a ( self : Any , snake_case__ : List[str] , snake_case__ : int=False ): """simple docstring""" A =self.model(pixel_values=snake_case__ ) A =clip_output.pooler_output A =self.mapper(latent_states[:, None] ) A =self.final_layer_norm(snake_case__ ) A =self.proj_out(snake_case__ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Any , snake_case__ : Tuple ): """simple docstring""" super().__init__() A =(config.num_hidden_layers + 1) // 5 A =config.hidden_size A =1 A =nn.ModuleList( [ BasicTransformerBlock(snake_case__ , snake_case__ , snake_case__ , activation_fn="gelu" , attention_bias=snake_case__ ) for _ in range(snake_case__ ) ] ) def _a ( self : int , snake_case__ : Tuple ): """simple docstring""" for block in self.blocks: A =block(snake_case__ ) return hidden_states
689
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_ ( a_ ) ->Tuple: A =FileLock(str(tmpdir / "foo.lock" ) ) A =FileLock(str(tmpdir / "foo.lock" ) ) A =0.01 with locka.acquire(): with pytest.raises(a_ ): A =time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def UpperCamelCase_ ( a_ ) ->List[Any]: A ="a" * 1000 + ".lock" A =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
689
1
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = ["image_processor"] _A = "SamImageProcessor" def __init__( self : Optional[Any] , snake_case__ : Optional[Any] ): """simple docstring""" super().__init__(snake_case__ ) A =self.image_processor A =-10 A =self.image_processor.size["longest_edge"] def __call__( self : Union[str, Any] , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Dict , ): """simple docstring""" A =self.image_processor( snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # pop arguments that are not used in the foward but used nevertheless A =encoding_image_processor["original_sizes"] if hasattr(snake_case__ , "numpy" ): # Checks if Torch or TF tensor A =original_sizes.numpy() A , A , A =self._check_and_preprocess_points( input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , ) A =self._normalize_and_convert( snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , ) return encoding_image_processor def _a ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Dict="pt" , ): """simple docstring""" if input_points is not None: if len(snake_case__ ) != len(snake_case__ ): A =[ self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] ) for point in input_points ] else: A =[ self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ ) for point, original_size in zip(snake_case__ , snake_case__ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: A , A =self._pad_points_and_labels(snake_case__ , snake_case__ ) A =np.array(snake_case__ ) if input_labels is not None: A =np.array(snake_case__ ) if input_boxes is not None: if len(snake_case__ ) != len(snake_case__ ): A =[ self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__ ) for box in input_boxes ] else: A =[ self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__ ) for box, original_size in zip(snake_case__ , snake_case__ ) ] A =np.array(snake_case__ ) if input_boxes is not None: if return_tensors == "pt": A =torch.from_numpy(snake_case__ ) # boxes batch size of 1 by default A =input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": A =tf.convert_to_tensor(snake_case__ ) # boxes batch size of 1 by default A =tf.expand_dims(snake_case__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": A =torch.from_numpy(snake_case__ ) # point batch size of 1 by default A =input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": A =tf.convert_to_tensor(snake_case__ ) # point batch size of 1 by default A =tf.expand_dims(snake_case__ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": A =torch.from_numpy(snake_case__ ) # point batch size of 1 by default A =input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": A =tf.convert_to_tensor(snake_case__ ) # point batch size of 1 by default A =tf.expand_dims(snake_case__ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Dict ): """simple docstring""" A =max([point.shape[0] for point in input_points] ) A =[] for i, point in enumerate(snake_case__ ): if point.shape[0] != expected_nb_points: A =np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) A =np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(snake_case__ ) A =processed_input_points return input_points, input_labels def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : np.ndarray , snake_case__ : List[Any] , snake_case__ : Optional[Any]=False ): """simple docstring""" A , A =original_size A , A =self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__ ) A =deepcopy(snake_case__ ).astype(snake_case__ ) if is_bounding_box: A =coords.reshape(-1 , 2 , 2 ) A =coords[..., 0] * (new_w / old_w) A =coords[..., 1] * (new_h / old_h) if is_bounding_box: A =coords.reshape(-1 , 4 ) return coords def _a ( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , ): """simple docstring""" if input_points is not None: if hasattr(snake_case__ , "numpy" ): # Checks for TF or Torch tensor A =input_points.numpy().tolist() if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_points[0] , snake_case__ ): raise ValueError("Input points must be a list of list of floating points." ) A =[np.array(snake_case__ ) for input_point in input_points] else: A =None if input_labels is not None: if hasattr(snake_case__ , "numpy" ): A =input_labels.numpy().tolist() if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_labels[0] , snake_case__ ): raise ValueError("Input labels must be a list of list integers." ) A =[np.array(snake_case__ ) for label in input_labels] else: A =None if input_boxes is not None: if hasattr(snake_case__ , "numpy" ): A =input_boxes.numpy().tolist() if ( not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_boxes[0] , snake_case__ ) or not isinstance(input_boxes[0][0] , snake_case__ ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) A =[np.array(snake_case__ ).astype(np.floataa ) for box in input_boxes] else: A =None return input_points, input_labels, input_boxes @property def _a ( self : Optional[Any] ): """simple docstring""" A =self.image_processor.model_input_names return list(dict.fromkeys(snake_case__ ) ) def _a ( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" return self.image_processor.post_process_masks(*snake_case__ , **snake_case__ )
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __a = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __a = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __a = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Any ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ): """simple docstring""" A =len(references[0] ) if any(len(snake_case__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A =[[refs[i] for refs in references] for i in range(snake_case__ )] A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =sb_chrf.corpus_score(snake_case__ , snake_case__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
689
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __a = """true""" def UpperCamelCase_ ( a_ , a_=82 , a_=16 ) ->Tuple: set_seed(42 ) A =RegressionModel() A =deepcopy(a_ ) A =RegressionDataset(length=a_ ) A =DataLoader(a_ , batch_size=a_ ) model.to(accelerator.device ) A , A =accelerator.prepare(a_ , a_ ) return model, ddp_model, dataloader def UpperCamelCase_ ( a_ , a_=False ) ->Dict: A =AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) A =load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(a_ ): A =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs with accelerator.main_process_first(): A =dataset.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , ) A =tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): if use_longest: return tokenizer.pad(a_ , padding="longest" , return_tensors="pt" ) return tokenizer.pad(a_ , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(a_ , shuffle=a_ , collate_fn=a_ , batch_size=16 ) def UpperCamelCase_ ( a_ , a_ ) ->Optional[Any]: A =Accelerator(dispatch_batches=a_ , split_batches=a_ ) A =get_dataloader(a_ , not dispatch_batches ) A =AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=a_ ) A , A =accelerator.prepare(a_ , a_ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[int]: A =[] for batch in dataloader: A , A =batch.values() with torch.no_grad(): A =model(a_ ) A , A =accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A , A =[], [] for logit, targ in logits_and_targets: logits.append(a_ ) targs.append(a_ ) A , A =torch.cat(a_ ), torch.cat(a_ ) return logits, targs def UpperCamelCase_ ( a_ , a_=82 , a_=False , a_=False , a_=16 ) ->List[Any]: A , A , A =get_basic_setup(a_ , a_ , a_ ) A , A =generate_predictions(a_ , a_ , a_ ) assert ( len(a_ ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(a_ )}''' def UpperCamelCase_ ( a_ = False , a_ = False ) ->Union[str, Any]: A =evaluate.load("glue" , "mrpc" ) A , A =get_mrpc_setup(a_ , a_ ) # First do baseline A , A , A =setup["no"] model.to(a_ ) model.eval() for batch in dataloader: batch.to(a_ ) with torch.inference_mode(): A =model(**a_ ) A =outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=a_ , references=batch["labels"] ) A =metric.compute() # Then do distributed A , A , A =setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): A =model(**a_ ) A =outputs.logits.argmax(dim=-1 ) A =batch["labels"] A , A =accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=a_ , references=a_ ) A =metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def UpperCamelCase_ ( ) ->Optional[Any]: A =Accelerator(split_batches=a_ , dispatch_batches=a_ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(a_ , a_ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A =Accelerator(split_batches=a_ , dispatch_batches=a_ ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(a_ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) A =Accelerator() test_torch_metrics(a_ , 512 ) accelerator.state._reset_state() def UpperCamelCase_ ( a_ ) ->Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : int , snake_case__ : List[Any] , snake_case__ : List[str]=13 , snake_case__ : Any=7 , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : str=99 , snake_case__ : int=[1, 1, 2] , snake_case__ : str=1 , snake_case__ : Union[str, Any]=32 , snake_case__ : List[str]=4 , snake_case__ : Optional[int]=8 , snake_case__ : int=37 , snake_case__ : Optional[Any]="gelu_new" , snake_case__ : Dict=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Dict=0.0 , snake_case__ : str=5_12 , snake_case__ : Optional[int]=3 , snake_case__ : str=0.02 , snake_case__ : Any=3 , snake_case__ : Any=4 , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=False , ): """simple docstring""" A =parent A =batch_size A =seq_length A =is_training A =use_input_mask A =use_token_type_ids A =use_labels A =vocab_size A =block_sizes A =num_decoder_layers A =d_model A =n_head A =d_head A =d_inner A =hidden_act A =hidden_dropout A =attention_dropout A =activation_dropout A =max_position_embeddings A =type_vocab_size A =2 A =num_labels A =num_choices A =scope A =initializer_std # Used in the tests to check the size of the first attention layer A =n_head # Used in the tests to check the size of the first hidden state A =self.d_model # Used in the tests to check the number of output hidden states/attentions A =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: A =self.num_hidden_layers + 2 def _a ( self : Optional[int] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =None if self.use_input_mask: A =random_attention_mask([self.batch_size, self.seq_length] ) A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFunnelModel(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) A =False A =TFFunnelModel(config=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) A =False A =TFFunnelModel(config=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def _a ( self : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str , ): """simple docstring""" A =TFFunnelBaseModel(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) A =False A =TFFunnelBaseModel(config=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) A =False A =TFFunnelBaseModel(config=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def _a ( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , ): """simple docstring""" A =TFFunnelForPreTraining(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , ): """simple docstring""" A =TFFunnelForMaskedLM(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , ): """simple docstring""" A =self.num_labels A =TFFunnelForSequenceClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : str , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] , ): """simple docstring""" A =self.num_choices A =TFFunnelForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Tuple , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , ): """simple docstring""" A =self.num_labels A =TFFunnelForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : str , ): """simple docstring""" A =TFFunnelForQuestionAnswering(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : str ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) _A = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Optional[int] ): """simple docstring""" A =TFFunnelModelTester(self ) A =ConfigTester(self , config_class=snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Any ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : Dict ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def _a ( self : Optional[int] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) @require_tf class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) _A = False _A = False def _a ( self : Union[str, Any] ): """simple docstring""" A =TFFunnelModelTester(self , base=snake_case__ ) A =ConfigTester(self , config_class=snake_case__ ) def _a ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Any ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*snake_case__ ) def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
689
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : List[str] , snake_case__ : Optional[int] , ): """simple docstring""" A =parent A =13 A =7 A =True A =True A =True A =True A =True A =False A =False A =False A =2 A =99 A =0 A =32 A =2 A =4 A =0.1 A =0.1 A =5_12 A =16 A =2 A =0.02 A =3 A =4 A ="last" A =True A =None A =0 def _a ( self : Optional[Any] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A =None if self.use_input_lengths: A =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFlaubertModel(config=snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertWithLMHeadModel(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ): """simple docstring""" A =TFFlaubertForQuestionAnsweringSimple(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertForSequenceClassification(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ): """simple docstring""" A =self.num_labels A =TFFlaubertForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =self.num_choices A =TFFlaubertForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Any ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={ "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _A = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _A = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self : Optional[int] ): """simple docstring""" A =TFFlaubertModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =TFFlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Tuple ): """simple docstring""" A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A =tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A =model(snake_case__ )[0] A =tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. A =tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
689
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) A =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _a ( self : int ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) A =DDPMScheduler() A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 ) A =output.audios[0] A =output.images[0] A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) A =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 A =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) A =DDIMScheduler() A =self.dummy_vqvae_and_unet A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) A =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 A =self.dummy_unet_condition A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =torch.rand((1, 1, 10) ) A =pipe(generator=snake_case__ , encoding=snake_case__ ) A =output.images[0] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ): """simple docstring""" A =torch_device A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ ) A =output.audios[0] A =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
689
from __future__ import annotations def UpperCamelCase_ ( a_ ) ->None: create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] ) def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None: if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A =True create_state_space_tree(a_ , a_ , index + 1 , a_ ) current_sequence.pop() A =False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
689
1
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = RoCBertTokenizer _A = None _A = False _A = True _A = filter_non_english def _a ( self : Any ): """simple docstring""" super().setUp() A =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] A ={} A ={} for i, value in enumerate(snake_case__ ): A =i A =i A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(snake_case__ , snake_case__ , ensure_ascii=snake_case__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(snake_case__ , snake_case__ , ensure_ascii=snake_case__ ) def _a ( self : str ): """simple docstring""" A =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A =tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(snake_case__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(snake_case__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(snake_case__ ) , [5, 6, 2, 5, 7, 8] ) def _a ( self : List[str] ): """simple docstring""" A =RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _a ( self : Optional[int] ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _a ( self : List[str] ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _a ( self : Optional[int] ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _a ( self : Optional[Any] ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _a ( self : Tuple ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _a ( self : Dict ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _a ( self : Optional[int] ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _a ( self : int ): """simple docstring""" A =RoCBertBasicTokenizer(do_lower_case=snake_case__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _a ( self : Optional[Any] ): """simple docstring""" A =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] A ={} for i, token in enumerate(snake_case__ ): A =i A =RoCBertWordpieceTokenizer(vocab=snake_case__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _a ( self : Optional[Any] ): """simple docstring""" self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _a ( self : List[str] ): """simple docstring""" self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _a ( self : Union[str, Any] ): """simple docstring""" self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _a ( self : Tuple ): """simple docstring""" A =self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: A =self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(snake_case__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def _a ( self : List[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) A =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' A =tokenizer_r.encode_plus( snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , ) A =tokenizer_r.do_lower_case if hasattr(snake_case__ , "do_lower_case" ) else False A =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _a ( self : Union[str, Any] ): """simple docstring""" A =["的", "人", "有"] A ="".join(snake_case__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A =True A =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) A =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) A =tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) A =tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) A =tokenizer_r.convert_ids_to_tokens(snake_case__ ) A =tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) A =False A =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) A =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) A =tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) A =tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) A =tokenizer_r.convert_ids_to_tokens(snake_case__ ) A =tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that only the first Chinese character is not preceded by "##". A =[ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(snake_case__ ) ] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" A =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A =tokenizer.encode("你好" , add_special_tokens=snake_case__ ) A =tokenizer.encode("你是谁" , add_special_tokens=snake_case__ ) A =tokenizer.build_inputs_with_special_tokens(snake_case__ ) A =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _a ( self : Dict ): """simple docstring""" A =self.get_tokenizers(do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A ="你好,你是谁" A =tokenizer.tokenize(snake_case__ ) A =tokenizer.convert_tokens_to_ids(snake_case__ ) A =tokenizer.convert_tokens_to_shape_ids(snake_case__ ) A =tokenizer.convert_tokens_to_pronunciation_ids(snake_case__ ) A =tokenizer.prepare_for_model( snake_case__ , snake_case__ , snake_case__ , add_special_tokens=snake_case__ ) A =tokenizer.encode_plus(snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ )
689
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) A =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _a ( self : int ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) A =DDPMScheduler() A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 ) A =output.audios[0] A =output.images[0] A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) A =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 A =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) A =DDIMScheduler() A =self.dummy_vqvae_and_unet A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) A =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 A =self.dummy_unet_condition A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =torch.rand((1, 1, 10) ) A =pipe(generator=snake_case__ , encoding=snake_case__ ) A =output.images[0] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ): """simple docstring""" A =torch_device A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ ) A =output.audios[0] A =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
689
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def UpperCamelCase_ ( a_ , a_ , a_ ) ->Dict: A =os.path.abspath(a_ ) logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model A =tf.train.list_variables(a_ ) A =[] A =[] A =[] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A =full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(f'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' A =name[1:] # figure out how many levels deep the name is A =0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(a_ ) # read data A =tf.train.load_variable(a_ , a_ ) names.append("/".join(a_ ) ) arrays.append(a_ ) logger.info(f'''Read a total of {len(a_ ):,} layers''' ) # Sanity check if len(set(a_ ) ) != 1: raise ValueError(f'''Found layer names with different depths (layer depth {list(set(a_ ) )})''' ) A =list(set(a_ ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(a_ , a_ ): A =full_name.split("/" ) A =model A =[] for i, m_name in enumerate(a_ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): A =int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) A =getattr(a_ , "embeddings" ) A =getattr(a_ , "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) A =getattr(a_ , "encoder" ) A =getattr(a_ , "layer" ) A =pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) A =getattr(a_ , "pooler" ) A =getattr(a_ , "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) A =getattr(a_ , "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) A =getattr(a_ , "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) A =getattr(a_ , "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) A =getattr(a_ , "token_type_embeddings" ) else: raise ValueError(f'''Unknown embedding layer with name {full_name}''' ) trace.append("weight" ) A =getattr(a_ , "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) A =getattr(a_ , "attention" ) A =getattr(a_ , "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) A =getattr(a_ , "attention" ) A =getattr(a_ , "output" ) A =getattr(a_ , "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) A =getattr(a_ , "attention" ) A =getattr(a_ , "output" ) A =getattr(a_ , "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) A =getattr(a_ , "output" ) A =getattr(a_ , "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) A =getattr(a_ , "output" ) A =getattr(a_ , "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) A =getattr(a_ , "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) A =getattr(a_ , "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) A =getattr(a_ , "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) A =getattr(a_ , "intermediate" ) A =getattr(a_ , "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) A =getattr(a_ , "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) A =getattr(a_ , "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) A =getattr(a_ , "weight" ) else: logger.warning(f'''Ignored {m_name}''' ) # for certain layers reshape is necessary A =".".join(a_ ) if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , a_ ) or re.match( R"(\S+)\.attention\.output\.dense\.weight" , a_ ): A =array.reshape(pointer.data.shape ) if "kernel" in full_name: A =array.transpose() if pointer.shape == array.shape: A =torch.from_numpy(a_ ) else: raise ValueError( f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' f''' {array.shape}''' ) logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[Any]: # Instantiate model logger.info(f'''Loading model based on config from {config_path}...''' ) A =BertConfig.from_json_file(a_ ) A =BertModel(a_ ) # Load weights from checkpoint logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(a_ , a_ , a_ ) # Save pytorch-model logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) __a = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
689
import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A =find_backend(" if not is_torch_available():" ) self.assertEqual(snake_case__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A =find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A =find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" ) def _a ( self : List[Any] ): """simple docstring""" A =read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , snake_case__ ) self.assertIn("torch_and_transformers" , snake_case__ ) self.assertIn("flax_and_transformers" , snake_case__ ) self.assertIn("torch_and_transformers_and_onnx" , snake_case__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def _a ( self : Dict ): """simple docstring""" A =create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(snake_case__ , "\nCONSTANT = None\n" ) A =create_dummy_object("function" , "'torch'" ) self.assertEqual( snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" A =create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , snake_case__ )
689
1
import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = UNetaDModel _A = "sample" @property def _a ( self : Optional[int] ): """simple docstring""" A =4 A =3 A =(32, 32) A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ ) A =torch.tensor([10] ).to(snake_case__ ) return {"sample": noise, "timestep": time_step} @property def _a ( self : List[str] ): """simple docstring""" return (3, 32, 32) @property def _a ( self : List[str] ): """simple docstring""" return (3, 32, 32) def _a ( self : Any ): """simple docstring""" A ={ "block_out_channels": (32, 64), "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), "attention_head_dim": 3, "out_channels": 3, "in_channels": 3, "layers_per_block": 2, "sample_size": 32, } A =self.dummy_input return init_dict, inputs_dict class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = UNetaDModel _A = "sample" @property def _a ( self : str ): """simple docstring""" A =4 A =4 A =(32, 32) A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ ) A =torch.tensor([10] ).to(snake_case__ ) return {"sample": noise, "timestep": time_step} @property def _a ( self : List[Any] ): """simple docstring""" return (4, 32, 32) @property def _a ( self : Tuple ): """simple docstring""" return (4, 32, 32) def _a ( self : List[str] ): """simple docstring""" A ={ "sample_size": 32, "in_channels": 4, "out_channels": 4, "layers_per_block": 2, "block_out_channels": (32, 64), "attention_head_dim": 32, "down_block_types": ("DownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "UpBlock2D"), } A =self.dummy_input return init_dict, inputs_dict def _a ( self : List[Any] ): """simple docstring""" A , A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(snake_case__ ) A =model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def _a ( self : List[str] ): """simple docstring""" A , A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ ) model.to(snake_case__ ) A =model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def _a ( self : int ): """simple docstring""" A , A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ ) model_accelerate.to(snake_case__ ) model_accelerate.eval() A =torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) A =noise.to(snake_case__ ) A =torch.tensor([10] * noise.shape[0] ).to(snake_case__ ) A =model_accelerate(snake_case__ , snake_case__ )["sample"] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() A , A =UNetaDModel.from_pretrained( "fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ , low_cpu_mem_usage=snake_case__ ) model_normal_load.to(snake_case__ ) model_normal_load.eval() A =model_normal_load(snake_case__ , snake_case__ )["sample"] assert torch_all_close(snake_case__ , snake_case__ , rtol=1E-3 ) def _a ( self : str ): """simple docstring""" A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ) model.eval() model.to(snake_case__ ) A =torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A =noise.to(snake_case__ ) A =torch.tensor([10] * noise.shape[0] ).to(snake_case__ ) with torch.no_grad(): A =model(snake_case__ , snake_case__ ).sample A =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off A =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-3 ) ) class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = UNetaDModel _A = "sample" @property def _a ( self : str , snake_case__ : Any=(32, 32) ): """simple docstring""" A =4 A =3 A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ ) A =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case__ ) return {"sample": noise, "timestep": time_step} @property def _a ( self : Optional[int] ): """simple docstring""" return (3, 32, 32) @property def _a ( self : Any ): """simple docstring""" return (3, 32, 32) def _a ( self : Optional[Any] ): """simple docstring""" A ={ "block_out_channels": [32, 64, 64, 64], "in_channels": 3, "layers_per_block": 1, "out_channels": 3, "time_embedding_type": "fourier", "norm_eps": 1E-6, "mid_block_scale_factor": math.sqrt(2.0 ), "norm_num_groups": None, "down_block_types": [ "SkipDownBlock2D", "AttnSkipDownBlock2D", "SkipDownBlock2D", "SkipDownBlock2D", ], "up_block_types": [ "SkipUpBlock2D", "SkipUpBlock2D", "AttnSkipUpBlock2D", "SkipUpBlock2D", ], } A =self.dummy_input return init_dict, inputs_dict @slow def _a ( self : Tuple ): """simple docstring""" A , A =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(snake_case__ ) A =self.dummy_input A =floats_tensor((4, 3) + (2_56, 2_56) ).to(snake_case__ ) A =noise A =model(**snake_case__ ) assert image is not None, "Make sure output is not None" @slow def _a ( self : Dict ): """simple docstring""" A =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" ) model.to(snake_case__ ) A =4 A =3 A =(2_56, 2_56) A =torch.ones((batch_size, num_channels) + sizes ).to(snake_case__ ) A =torch.tensor(batch_size * [1E-4] ).to(snake_case__ ) with torch.no_grad(): A =model(snake_case__ , snake_case__ ).sample A =output[0, -3:, -3:, -1].flatten().cpu() # fmt: off A =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-2 ) ) def _a ( self : List[Any] ): """simple docstring""" A =UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" ) model.to(snake_case__ ) A =4 A =3 A =(32, 32) A =torch.ones((batch_size, num_channels) + sizes ).to(snake_case__ ) A =torch.tensor(batch_size * [1E-4] ).to(snake_case__ ) with torch.no_grad(): A =model(snake_case__ , snake_case__ ).sample A =output[0, -3:, -3:, -1].flatten().cpu() # fmt: off A =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-2 ) ) def _a ( self : List[Any] ): """simple docstring""" pass
689
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
1
from collections import deque from .hash_table import HashTable class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : str ): """simple docstring""" super().__init__(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , snake_case__ : str , snake_case__ : List[str] ): """simple docstring""" A =deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(snake_case__ ) A =self.values[key] def _a ( self : Dict ): """simple docstring""" return ( sum(self.charge_factor - len(snake_case__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _a ( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=None ): """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(snake_case__ ) == 0 ): return key return super()._collision_resolution(snake_case__ , snake_case__ )
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def UpperCamelCase_ ( a_ ) ->Union[str, Any]: if not is_accelerate_available(): return method A =version.parse(accelerate.__version__ ).base_version if version.parse(a_ ) < version.parse("0.17.0" ): return method def wrapper(self , *a_ , **a_ ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *a_ , **a_ ) return wrapper
689
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int: try: A =int(a_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A =2 A =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A =i while n % i == 0: A =n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Dict , snake_case__ : pyspark.sql.DataFrame , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Optional[Features] = None , snake_case__ : bool = True , snake_case__ : str = None , snake_case__ : bool = False , snake_case__ : str = None , snake_case__ : bool = True , snake_case__ : str = "arrow" , **snake_case__ : Dict , ): """simple docstring""" super().__init__( split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , **snake_case__ , ) A =load_from_cache_file A =file_format A =Spark( df=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , working_dir=snake_case__ , **snake_case__ , ) def _a ( self : Optional[Any] ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) A =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=snake_case__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
689
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
689
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
689
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """spm_char.model"""} __a = { """vocab_file""": { """microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""", """microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""", """microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""", } } __a = { """microsoft/speecht5_asr""": 1_0_2_4, """microsoft/speecht5_tts""": 1_0_2_4, """microsoft/speecht5_vc""": 1_0_2_4, } class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[Any]="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Tuple , ): """simple docstring""" A ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) A =vocab_file A =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) @property def _a ( self : Optional[Any] ): """simple docstring""" return self.sp_model.get_piece_size() def _a ( self : Any ): """simple docstring""" A ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): """simple docstring""" A =self.__dict__.copy() A =None return state def __setstate__( self : Optional[int] , snake_case__ : str ): """simple docstring""" A =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A ={} A =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Dict , snake_case__ : str ): """simple docstring""" return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def _a ( self : Union[str, Any] , snake_case__ : int ): """simple docstring""" return self.sp_model.piece_to_id(snake_case__ ) def _a ( self : Union[str, Any] , snake_case__ : Tuple ): """simple docstring""" A =self.sp_model.IdToPiece(snake_case__ ) return token def _a ( self : Dict , snake_case__ : Tuple ): """simple docstring""" A =[] A ="" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case__ ) + token A =[] else: current_sub_tokens.append(snake_case__ ) out_string += self.sp_model.decode(snake_case__ ) return out_string.strip() def _a ( self : Any , snake_case__ : Any , snake_case__ : Any=None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) A =[1] if token_ids_a is None: return ([0] * len(snake_case__ )) + suffix_ones return ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones def _a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , "wb" ) as fi: A =self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,)
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""MobileViTFeatureExtractor"""] __a = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class UpperCamelCase__: """simple docstring""" def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : int=13 , snake_case__ : List[str]=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=False , snake_case__ : Union[str, Any]=True , snake_case__ : Any=99 , snake_case__ : Tuple=32 , snake_case__ : Optional[int]=5 , snake_case__ : List[Any]=4 , snake_case__ : Union[str, Any]=37 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=5_12 , snake_case__ : Union[str, Any]=16 , snake_case__ : Any=2 , snake_case__ : int=0.02 , snake_case__ : Tuple=3 , snake_case__ : int=4 , snake_case__ : Tuple=None , ): """simple docstring""" A =parent A =batch_size A =seq_length A =is_training A =use_input_mask A =use_token_type_ids A =use_labels A =vocab_size A =hidden_size A =num_hidden_layers A =num_attention_heads A =intermediate_size A =hidden_act A =hidden_dropout_prob A =attention_probs_dropout_prob A =max_position_embeddings A =type_vocab_size A =type_sequence_label_size A =initializer_range A =num_labels A =num_choices A =scope def _a ( self : Tuple ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =None if self.use_input_mask: A =random_attention_mask([self.batch_size, self.seq_length] ) A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , self.num_choices ) A =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[int] ): """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def _a ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] ): """simple docstring""" A =LlamaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : int , snake_case__ : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : int , ): """simple docstring""" A =True A =LlamaModel(snake_case__ ) model.to(snake_case__ ) model.eval() A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) A =model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , ): """simple docstring""" A =LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : int , ): """simple docstring""" A =True A =True A =LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() # first forward pass A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) A =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A =ids_tensor((self.batch_size, 3) , config.vocab_size ) A =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A =torch.cat([input_ids, next_tokens] , dim=-1 ) A =torch.cat([input_mask, next_mask] , dim=-1 ) A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice A =ids_tensor((1,) , output_from_past.shape[-1] ).item() A =output_from_no_past[:, -3:, random_slice_idx].detach() A =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) def _a ( self : Optional[Any] ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _A = (LlamaForCausalLM,) if is_torch_available() else () _A = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _A = False _A = False def _a ( self : Optional[Any] ): """simple docstring""" A =LlamaModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def _a ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : List[str] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : List[str] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A =type self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A =LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Union[str, Any] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A ="single_label_classification" A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A =LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : List[str] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A ="multi_label_classification" A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A =LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("LLaMA buffers include complex numbers, which breaks this test" ) def _a ( self : Tuple ): """simple docstring""" pass @parameterized.expand([("linear",), ("dynamic",)] ) def _a ( self : Dict , snake_case__ : List[str] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =ids_tensor([1, 10] , config.vocab_size ) A =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A =LlamaModel(snake_case__ ) original_model.to(snake_case__ ) original_model.eval() A =original_model(snake_case__ ).last_hidden_state A =original_model(snake_case__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A ={"type": scaling_type, "factor": 10.0} A =LlamaModel(snake_case__ ) scaled_model.to(snake_case__ ) scaled_model.eval() A =scaled_model(snake_case__ ).last_hidden_state A =scaled_model(snake_case__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) @require_torch class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _a ( self : List[Any] ): """simple docstring""" A =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] A =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" ) A =model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A =torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off A =torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _a ( self : int ): """simple docstring""" A =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] A =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" ) A =model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 A =torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off A =torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _a ( self : Any ): """simple docstring""" A =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] A =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" ) A =model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 A =torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off A =torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( "Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] A =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" ) A =model(torch.tensor(snake_case__ ) ) A =torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # fmt: off A =torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Model is curently gated" ) @slow def _a ( self : Any ): """simple docstring""" A ="Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" A ="Simply put, the theory of relativity states that " A =LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ) A =tokenizer.encode(snake_case__ , return_tensors="pt" ) A =LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=snake_case__ ) # greedy generation outputs A =model.generate(snake_case__ , max_new_tokens=64 , top_p=snake_case__ , temperature=1 , do_sample=snake_case__ ) A =tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ )
689
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase_ ( ) ->None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
689
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = 42 class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : List[Any] , snake_case__ : Tuple=3 , snake_case__ : List[Any]=3 , snake_case__ : Any=("DownEncoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : Dict=2 , snake_case__ : List[str]=32 , snake_case__ : List[Any]="silu" , snake_case__ : Optional[Any]=True , ): """simple docstring""" super().__init__() A =layers_per_block A =torch.nn.Convad( snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) A =None A =nn.ModuleList([] ) # down A =block_out_channels[0] for i, down_block_type in enumerate(snake_case__ ): A =output_channel A =block_out_channels[i] A =i == len(snake_case__ ) - 1 A =get_down_block( snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , ) self.down_blocks.append(snake_case__ ) # mid A =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # out A =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1E-6 ) A =nn.SiLU() A =2 * out_channels if double_z else out_channels A =nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 ) A =False def _a ( self : Any , snake_case__ : List[Any] ): """simple docstring""" A =x A =self.conv_in(snake_case__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : List[Any] ): def custom_forward(*snake_case__ : Tuple ): return module(*snake_case__ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: A =torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ ) # middle A =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ ) else: for down_block in self.down_blocks: A =torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ ) # middle A =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ ) else: # down for down_block in self.down_blocks: A =down_block(snake_case__ ) # middle A =self.mid_block(snake_case__ ) # post-process A =self.conv_norm_out(snake_case__ ) A =self.conv_act(snake_case__ ) A =self.conv_out(snake_case__ ) return sample class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : List[str] , snake_case__ : Dict=3 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[int]=("UpDecoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : List[str]=2 , snake_case__ : Tuple=32 , snake_case__ : Optional[int]="silu" , snake_case__ : List[str]="group" , ): """simple docstring""" super().__init__() A =layers_per_block A =nn.Convad( snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) A =None A =nn.ModuleList([] ) A =in_channels if norm_type == "spatial" else None # mid A =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # up A =list(reversed(snake_case__ ) ) A =reversed_block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): A =output_channel A =reversed_block_out_channels[i] A =i == len(snake_case__ ) - 1 A =get_up_block( snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , ) self.up_blocks.append(snake_case__ ) A =output_channel # out if norm_type == "spatial": A =SpatialNorm(block_out_channels[0] , snake_case__ ) else: A =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1E-6 ) A =nn.SiLU() A =nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 ) A =False def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Dict=None ): """simple docstring""" A =z A =self.conv_in(snake_case__ ) A =next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : str ): def custom_forward(*snake_case__ : Optional[Any] ): return module(*snake_case__ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle A =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) A =sample.to(snake_case__ ) # up for up_block in self.up_blocks: A =torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) else: # middle A =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ ) A =sample.to(snake_case__ ) # up for up_block in self.up_blocks: A =torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ ) else: # middle A =self.mid_block(snake_case__ , snake_case__ ) A =sample.to(snake_case__ ) # up for up_block in self.up_blocks: A =up_block(snake_case__ , snake_case__ ) # post-process if latent_embeds is None: A =self.conv_norm_out(snake_case__ ) else: A =self.conv_norm_out(snake_case__ , snake_case__ ) A =self.conv_act(snake_case__ ) A =self.conv_out(snake_case__ ) return sample class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Optional[int]="random" , snake_case__ : Tuple=False , snake_case__ : Optional[int]=True ): """simple docstring""" super().__init__() A =n_e A =vq_embed_dim A =beta A =legacy A =nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) A =remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) A =self.used.shape[0] A =unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": A =self.re_embed A =self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: A =n_e A =sane_index_shape def _a ( self : Optional[Any] , snake_case__ : Dict ): """simple docstring""" A =inds.shape assert len(snake_case__ ) > 1 A =inds.reshape(ishape[0] , -1 ) A =self.used.to(snake_case__ ) A =(inds[:, :, None] == used[None, None, ...]).long() A =match.argmax(-1 ) A =match.sum(2 ) < 1 if self.unknown_index == "random": A =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: A =self.unknown_index return new.reshape(snake_case__ ) def _a ( self : Optional[int] , snake_case__ : Union[str, Any] ): """simple docstring""" A =inds.shape assert len(snake_case__ ) > 1 A =inds.reshape(ishape[0] , -1 ) A =self.used.to(snake_case__ ) if self.re_embed > self.used.shape[0]: # extra token A =0 # simply set to zero A =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ ) return back.reshape(snake_case__ ) def _a ( self : str , snake_case__ : List[Any] ): """simple docstring""" A =z.permute(0 , 2 , 3 , 1 ).contiguous() A =z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z A =torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 ) A =self.embedding(snake_case__ ).view(z.shape ) A =None A =None # compute loss for embedding if not self.legacy: A =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: A =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients A =z + (z_q - z).detach() # reshape back to match original input shape A =z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: A =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis A =self.remap_to_used(snake_case__ ) A =min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: A =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] ): """simple docstring""" if self.remap is not None: A =indices.reshape(shape[0] , -1 ) # add batch axis A =self.unmap_to_all(snake_case__ ) A =indices.reshape(-1 ) # flatten again # get quantized latent vectors A =self.embedding(snake_case__ ) if shape is not None: A =z_q.view(snake_case__ ) # reshape back to match original input shape A =z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Any , snake_case__ : Tuple , snake_case__ : Any=False ): """simple docstring""" A =parameters A , A =torch.chunk(snake_case__ , 2 , dim=1 ) A =torch.clamp(self.logvar , -30.0 , 20.0 ) A =deterministic A =torch.exp(0.5 * self.logvar ) A =torch.exp(self.logvar ) if self.deterministic: A =A =torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _a ( self : Optional[Any] , snake_case__ : Optional[torch.Generator] = None ): """simple docstring""" A =randn_tensor( self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype ) A =self.mean + self.std * sample return x def _a ( self : Optional[int] , snake_case__ : str=None ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=[1, 2, 3] ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) A =np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ ) def _a ( self : Dict ): """simple docstring""" return self.mean
689
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
1
from abc import ABC, abstractmethod from typing import List, Optional class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def _a ( self : int ): """simple docstring""" A =0 A =False while not completed: if counter == 1: self.reset() A =self.advance() if not self.does_advance(snake_case__ ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) A , A , A =self.update(snake_case__ ) counter += 1 if counter > 1_00_00: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def _a ( self : Tuple ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _a ( self : Dict , snake_case__ : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _a ( self : Any ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _a ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _a ( self : List[str] , snake_case__ : Any=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Any , snake_case__ : List[int] ): """simple docstring""" super(snake_case__ , self ).__init__() if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(snake_case__ , snake_case__ ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) A =token_ids A =len(self.token_ids ) A =-1 # the index of the currently fulfilled step A =False def _a ( self : List[Any] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def _a ( self : int , snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(snake_case__ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def _a ( self : Optional[int] , snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(snake_case__ )}''' ) A =False A =False A =False if self.does_advance(snake_case__ ): self.fulfilled_idx += 1 A =True if self.fulfilled_idx == (self.seqlen - 1): A =True A =completed else: # failed to make progress. A =True self.reset() return stepped, completed, reset def _a ( self : List[str] ): """simple docstring""" A =False A =0 def _a ( self : Optional[Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def _a ( self : List[str] , snake_case__ : Optional[int]=False ): """simple docstring""" A =PhrasalConstraint(self.token_ids ) if stateful: A =self.seqlen A =self.fulfilled_idx A =self.completed return new_constraint class UpperCamelCase__: """simple docstring""" def __init__( self : Dict , snake_case__ : List[List[int]] , snake_case__ : List[str]=True ): """simple docstring""" A =max([len(snake_case__ ) for one in nested_token_ids] ) A ={} for token_ids in nested_token_ids: A =root for tidx, token_id in enumerate(snake_case__ ): if token_id not in level: A ={} A =level[token_id] if no_subsets and self.has_subsets(snake_case__ , snake_case__ ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f''' {nested_token_ids}.''' ) A =root def _a ( self : Dict , snake_case__ : Tuple ): """simple docstring""" A =self.trie for current_token in current_seq: A =start[current_token] A =list(start.keys() ) return next_tokens def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =self.next_tokens(snake_case__ ) return len(snake_case__ ) == 0 def _a ( self : Tuple , snake_case__ : Dict ): """simple docstring""" A =list(root.values() ) if len(snake_case__ ) == 0: return 1 else: return sum([self.count_leaves(snake_case__ ) for nn in next_nodes] ) def _a ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): """simple docstring""" A =self.count_leaves(snake_case__ ) return len(snake_case__ ) != leaf_count class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , snake_case__ : List[List[int]] ): """simple docstring""" super(snake_case__ , self ).__init__() if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(snake_case__ , snake_case__ ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(snake_case__ , snake_case__ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) A =DisjunctiveTrie(snake_case__ ) A =nested_token_ids A =self.trie.max_height A =[] A =False def _a ( self : Union[str, Any] ): """simple docstring""" A =self.trie.next_tokens(self.current_seq ) if len(snake_case__ ) == 0: return None else: return token_list def _a ( self : Union[str, Any] , snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case__ )}''' ) A =self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def _a ( self : Optional[int] , snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case__ )}''' ) A =False A =False A =False if self.does_advance(snake_case__ ): self.current_seq.append(snake_case__ ) A =True else: A =True self.reset() A =self.trie.reached_leaf(self.current_seq ) A =completed return stepped, completed, reset def _a ( self : Optional[int] ): """simple docstring""" A =False A =[] def _a ( self : Optional[int] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def _a ( self : List[str] , snake_case__ : str=False ): """simple docstring""" A =DisjunctiveConstraint(self.token_ids ) if stateful: A =self.seqlen A =self.current_seq A =self.completed return new_constraint class UpperCamelCase__: """simple docstring""" def __init__( self : int , snake_case__ : List[Constraint] ): """simple docstring""" A =constraints # max # of steps required to fulfill a given constraint A =max([c.seqlen for c in constraints] ) A =len(snake_case__ ) A =False self.init_state() def _a ( self : Dict ): """simple docstring""" A =[] A =None A =[constraint.copy(stateful=snake_case__ ) for constraint in self.constraints] def _a ( self : List[Any] ): """simple docstring""" A =0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def _a ( self : Optional[Any] ): """simple docstring""" A =[] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" A =constraint.advance() if isinstance(snake_case__ , snake_case__ ): token_list.append(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): token_list.extend(snake_case__ ) else: A =self.inprogress_constraint.advance() if isinstance(snake_case__ , snake_case__ ): token_list.append(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): token_list.extend(snake_case__ ) if len(snake_case__ ) == 0: return None else: return token_list def _a ( self : str , snake_case__ : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint A , A =self.add(snake_case__ ) # the entire list of constraints are fulfilled if self.completed: break def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) A , A =False, False if self.completed: A =True A =False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state A , A , A =self.inprogress_constraint.update(snake_case__ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case__ ) ) A =None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) A =None if len(self.pending_constraints ) == 0: # we're done! A =True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(snake_case__ ): A , A , A =pending_constraint.update(snake_case__ ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(snake_case__ ) A =None if not complete and stepped: A =pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". A =( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. A =True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def _a ( self : Optional[Any] , snake_case__ : Union[str, Any]=True ): """simple docstring""" A =ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: A =[ constraint.copy(stateful=snake_case__ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: A =self.inprogress_constraint.copy(stateful=snake_case__ ) A =[constraint.copy() for constraint in self.pending_constraints] return new_state
689
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __a = """src/diffusers""" # Matches is_xxx_available() __a = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla __a = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") __a = """ {0} = None """ __a = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ __a = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def UpperCamelCase_ ( a_ ) ->Any: A =_re_backend.findall(a_ ) if len(a_ ) == 0: return None return "_and_".join(a_ ) def UpperCamelCase_ ( ) ->Optional[Any]: with open(os.path.join(a_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f: A =f.readlines() # Get to the point we do the actual imports for type checking A =0 A ={} # Go through the end of the file while line_index < len(a_ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block A =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 A =[] # Until we unindent, add backend objects to the list while line_index < len(a_ ) and len(lines[line_index] ) > 1: A =lines[line_index] A =_re_single_line_import.search(a_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(a_ ) > 0: A =objects else: line_index += 1 return backend_specific_objects def UpperCamelCase_ ( a_ , a_ ) ->Any: if name.isupper(): return DUMMY_CONSTANT.format(a_ ) elif name.islower(): return DUMMY_FUNCTION.format(a_ , a_ ) else: return DUMMY_CLASS.format(a_ , a_ ) def UpperCamelCase_ ( a_=None ) ->int: if backend_specific_objects is None: A =read_init() # For special correspondence backend to module name as used in the function requires_modulename A ={} for backend, objects in backend_specific_objects.items(): A ="[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_" ) ) + "]" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(a_ , a_ ) for o in objects] ) A =dummy_file return dummy_files def UpperCamelCase_ ( a_=False ) ->List[Any]: A =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py A ={"torch": "pt"} # Locate actual dummy modules and read their content. A =os.path.join(a_ , "utils" ) A ={ backend: os.path.join(a_ , f'''dummy_{short_names.get(a_ , a_ )}_objects.py''' ) for backend in dummy_files.keys() } A ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(a_ ): with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f: A =f.read() else: A ="" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py as the main ''' "__init__ has new objects." ) with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " f'''diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py. Run `make fix-copies` ''' "to fix this." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __a = parser.parse_args() check_dummies(args.fix_and_overwrite)
689
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } __a = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } __a = { """ctrl""": 2_5_6, } __a = { """Pregnancy""": 1_6_8_6_2_9, """Christianity""": 7_6_7_5, """Explain""": 1_0_6_4_2_3, """Fitness""": 6_3_4_4_0, """Saving""": 6_3_1_6_3, """Ask""": 2_7_1_7_1, """Ass""": 9_5_9_8_5, """Joke""": 1_6_3_5_0_9, """Questions""": 4_5_6_2_2, """Thoughts""": 4_9_6_0_5, """Retail""": 5_2_3_4_2, """Feminism""": 1_6_4_3_3_8, """Writing""": 1_1_9_9_2, """Atheism""": 1_9_2_2_6_3, """Netflix""": 4_8_6_1_6, """Computing""": 3_9_6_3_9, """Opinion""": 4_3_2_1_3, """Alone""": 4_4_9_6_7, """Funny""": 5_8_9_1_7, """Gaming""": 4_0_3_5_8, """Human""": 4_0_8_8, """India""": 1_3_3_1, """Joker""": 7_7_1_3_8, """Diet""": 3_6_2_0_6, """Legal""": 1_1_8_5_9, """Norman""": 4_9_3_9, """Tip""": 7_2_6_8_9, """Weight""": 5_2_3_4_3, """Movies""": 4_6_2_7_3, """Running""": 2_3_4_2_5, """Science""": 2_0_9_0, """Horror""": 3_7_7_9_3, """Confession""": 6_0_5_7_2, """Finance""": 1_2_2_5_0, """Politics""": 1_6_3_6_0, """Scary""": 1_9_1_9_8_5, """Support""": 1_2_6_5_4, """Technologies""": 3_2_5_1_6, """Teenage""": 6_6_1_6_0, """Event""": 3_2_7_6_9, """Learned""": 6_7_4_6_0, """Notion""": 1_8_2_7_7_0, """Wikipedia""": 3_7_5_8_3, """Books""": 6_6_6_5, """Extract""": 7_6_0_5_0, """Confessions""": 1_0_2_7_0_1, """Conspiracy""": 7_5_9_3_2, """Links""": 6_3_6_7_4, """Narcissus""": 1_5_0_4_2_5, """Relationship""": 5_4_7_6_6, """Relationships""": 1_3_4_7_9_6, """Reviews""": 4_1_6_7_1, """News""": 4_2_5_6, """Translation""": 2_6_8_2_0, """multilingual""": 1_2_8_4_0_6, } def UpperCamelCase_ ( a_ ) ->List[str]: A =set() A =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A =char A =set(a_ ) return pairs class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = CONTROL_CODES def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ): """simple docstring""" super().__init__(unk_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: A =json.load(snake_case__ ) A ={v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: A =merges_handle.read().split("\n" )[1:-1] A =[tuple(merge.split() ) for merge in merges] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A ={} @property def _a ( self : str ): """simple docstring""" return len(self.encoder ) def _a ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : int , snake_case__ : Any ): """simple docstring""" if token in self.cache: return self.cache[token] A =tuple(snake_case__ ) A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A =get_pairs(snake_case__ ) if not pairs: return token while True: A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A , A =bigram A =[] A =0 while i < len(snake_case__ ): try: A =word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A =j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A =tuple(snake_case__ ) A =new_word if len(snake_case__ ) == 1: break else: A =get_pairs(snake_case__ ) A ="@@ ".join(snake_case__ ) A =word[:-4] A =word return word def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" A =[] A =re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _a ( self : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : Any ): """simple docstring""" A =" ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) A =0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A =token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
689
1
def UpperCamelCase_ ( a_ = 100 ) ->int: A =set() A =0 A =n + 1 # maximum limit for a in range(2 , a_ ): for b in range(2 , a_ ): A =a**b # calculates the current power collect_powers.add(a_ ) # adds the result to the set return len(a_ ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
689
def UpperCamelCase_ ( a_ , a_ ) ->list[int]: A =int(a_ ) # Initialize Result A =[] # Traverse through all denomination for denomination in reversed(a_ ): # Find denominations while int(a_ ) >= int(a_ ): total_value -= int(a_ ) answer.append(a_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __a = [] __a = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __a = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __a = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] __a = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __a = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
689
1
import os from typing import Dict, List, Tuple, TypeVar, Union __a = TypeVar("""T""") __a = Union[List[T], Tuple[T, ...]] __a = Union[T, List[T], Dict[str, T]] __a = Union[str, bytes, os.PathLike]
689
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = ["""model.decoder.embed_positions.weights"""] def UpperCamelCase_ ( a_ ) ->List[str]: if "emb" in name: A =name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: A =name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: A =name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: A =name.replace("linear1" , "fc1" ) if "linear2" in name: A =name.replace("linear2" , "fc2" ) if "norm1" in name: A =name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: A =name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: A =name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: A =name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: A =name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]: A =list(state_dict.keys() ) A ={} for key in keys: A =state_dict.pop(a_ ) A =rename_keys(a_ ) if "in_proj_weight" in key: # split fused qkv proj A =val[:hidden_size, :] A =val[hidden_size : 2 * hidden_size, :] A =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A =val else: A =val return state_dict, enc_dec_proj_state_dict def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig: if checkpoint == "small": # default config values A =1024 A =24 A =16 elif checkpoint == "medium": A =1536 A =48 A =24 elif checkpoint == "large": A =2048 A =48 A =32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) A =MusicgenDecoderConfig( hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , ) return config @torch.no_grad() def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]: A =MusicGen.get_pretrained(a_ , device=a_ ) A =decoder_config_from_checkpoint(a_ ) A =fairseq_model.lm.state_dict() A , A =rename_state_dict( a_ , hidden_size=decoder_config.hidden_size ) A =TaEncoderModel.from_pretrained("t5-base" ) A =EncodecModel.from_pretrained("facebook/encodec_32khz" ) A =MusicgenForCausalLM(a_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A , A =decoder.load_state_dict(a_ , strict=a_ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(a_ ) if len(a_ ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(a_ ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(a_ ) # check we can do a forward pass A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A =model(input_ids=a_ , decoder_input_ids=a_ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor A =AutoTokenizer.from_pretrained("t5-base" ) A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ ) # set the appropriate bos/pad token ids A =2048 A =2048 # set other default generation config params A =int(30 * audio_encoder.config.frame_rate ) A =True A =3.0 if pytorch_dump_folder is not None: Path(a_ ).mkdir(exist_ok=a_ ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(a_ ) processor.push_to_hub(a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) __a = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
1
def UpperCamelCase_ ( a_ ) ->list: A =len(a_ ) for _ in range(a_ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: A , A =arr[i + 1], arr[i] return arr if __name__ == "__main__": __a = list(range(1_0, 0, -1)) print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
689
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_ ( a_ ) ->Tuple: A =FileLock(str(tmpdir / "foo.lock" ) ) A =FileLock(str(tmpdir / "foo.lock" ) ) A =0.01 with locka.acquire(): with pytest.raises(a_ ): A =time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def UpperCamelCase_ ( a_ ) ->List[Any]: A ="a" * 1000 + ".lock" A =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
689
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = """RegNetConfig""" # Base docstring __a = """facebook/regnet-y-040""" __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = """facebook/regnet-y-040""" __a = """tabby, tabby cat""" __a = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , ): """simple docstring""" super().__init__() A =nn.Convad( snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=kernel_size // 2 , groups=snake_case__ , bias=snake_case__ , ) A =nn.BatchNormad(snake_case__ ) A =ACTaFN[activation] if activation is not None else nn.Identity() def _a ( self : Dict , snake_case__ : Dict ): """simple docstring""" A =self.convolution(snake_case__ ) A =self.normalization(snake_case__ ) A =self.activation(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : List[str] , snake_case__ : RegNetConfig ): """simple docstring""" super().__init__() A =RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) A =config.num_channels def _a ( self : str , snake_case__ : Tuple ): """simple docstring""" A =pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) A =self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 ): """simple docstring""" super().__init__() A =nn.Convad(snake_case__ , snake_case__ , kernel_size=1 , stride=snake_case__ , bias=snake_case__ ) A =nn.BatchNormad(snake_case__ ) def _a ( self : Tuple , snake_case__ : Tensor ): """simple docstring""" A =self.convolution(snake_case__ ) A =self.normalization(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int ): """simple docstring""" super().__init__() A =nn.AdaptiveAvgPoolad((1, 1) ) A =nn.Sequential( nn.Convad(snake_case__ , snake_case__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(snake_case__ , snake_case__ , kernel_size=1 ) , nn.Sigmoid() , ) def _a ( self : Tuple , snake_case__ : str ): """simple docstring""" A =self.pooler(snake_case__ ) A =self.attention(snake_case__ ) A =hidden_state * attention return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 ): """simple docstring""" super().__init__() A =in_channels != out_channels or stride != 1 A =max(1 , out_channels // config.groups_width ) A =( RegNetShortCut(snake_case__ , snake_case__ , stride=snake_case__ ) if should_apply_shortcut else nn.Identity() ) A =nn.Sequential( RegNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case__ , snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act ) , RegNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 , activation=snake_case__ ) , ) A =ACTaFN[config.hidden_act] def _a ( self : str , snake_case__ : Optional[Any] ): """simple docstring""" A =hidden_state A =self.layer(snake_case__ ) A =self.shortcut(snake_case__ ) hidden_state += residual A =self.activation(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : str , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 ): """simple docstring""" super().__init__() A =in_channels != out_channels or stride != 1 A =max(1 , out_channels // config.groups_width ) A =( RegNetShortCut(snake_case__ , snake_case__ , stride=snake_case__ ) if should_apply_shortcut else nn.Identity() ) A =nn.Sequential( RegNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case__ , snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act ) , RegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 , activation=snake_case__ ) , ) A =ACTaFN[config.hidden_act] def _a ( self : Union[str, Any] , snake_case__ : int ): """simple docstring""" A =hidden_state A =self.layer(snake_case__ ) A =self.shortcut(snake_case__ ) hidden_state += residual A =self.activation(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Any , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , ): """simple docstring""" super().__init__() A =RegNetXLayer if config.layer_type == "x" else RegNetYLayer A =nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , ) , *[layer(snake_case__ , snake_case__ , snake_case__ ) for _ in range(depth - 1 )] , ) def _a ( self : Optional[int] , snake_case__ : Optional[int] ): """simple docstring""" A =self.layers(snake_case__ ) return hidden_state class UpperCamelCase__( nn.Module ): """simple docstring""" def __init__( self : Dict , snake_case__ : RegNetConfig ): """simple docstring""" super().__init__() A =nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) A =zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case__ , config.depths[1:] ): self.stages.append(RegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ ) ) def _a ( self : List[Any] , snake_case__ : Tensor , snake_case__ : bool = False , snake_case__ : bool = True ): """simple docstring""" A =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A =hidden_states + (hidden_state,) A =stage_module(snake_case__ ) if output_hidden_states: A =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = RegNetConfig _A = "regnet" _A = "pixel_values" _A = True def _a ( self : Union[str, Any] , snake_case__ : List[Any] ): """simple docstring""" if isinstance(snake_case__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _a ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=False ): """simple docstring""" if isinstance(snake_case__ , snake_case__ ): A =value __a = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __a = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Dict , snake_case__ : Tuple ): """simple docstring""" super().__init__(snake_case__ ) A =config A =RegNetEmbeddings(snake_case__ ) A =RegNetEncoder(snake_case__ ) A =nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a ( self : Tuple , snake_case__ : Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None ): """simple docstring""" A =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A =return_dict if return_dict is not None else self.config.use_return_dict A =self.embedder(snake_case__ ) A =self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ ) A =encoder_outputs[0] A =self.pooler(snake_case__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , snake_case__ : Any ): """simple docstring""" super().__init__(snake_case__ ) A =config.num_labels A =RegNetModel(snake_case__ ) # classification head A =nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): """simple docstring""" A =return_dict if return_dict is not None else self.config.use_return_dict A =self.regnet(snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ ) A =outputs.pooler_output if return_dict else outputs[1] A =self.classifier(snake_case__ ) A =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A ="single_label_classification" else: A ="multi_label_classification" if self.config.problem_type == "regression": A =MSELoss() if self.num_labels == 1: A =loss_fct(logits.squeeze() , labels.squeeze() ) else: A =loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": A =CrossEntropyLoss() A =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A =BCEWithLogitsLoss() A =loss_fct(snake_case__ , snake_case__ ) if not return_dict: A =(logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __a = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase_ ( a_ ) ->List[Any]: warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , a_ , ) if isinstance(a_ , torch.Tensor ): return image elif isinstance(a_ , PIL.Image.Image ): A =[image] if isinstance(image[0] , PIL.Image.Image ): A , A =image[0].size A , A =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8 A =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] A =np.concatenate(a_ , axis=0 ) A =np.array(a_ ).astype(np.floataa ) / 255.0 A =image.transpose(0 , 3 , 1 , 2 ) A =2.0 * image - 1.0 A =torch.from_numpy(a_ ) elif isinstance(image[0] , torch.Tensor ): A =torch.cat(a_ , dim=0 ) return image def UpperCamelCase_ ( a_ ) ->Dict: if isinstance(a_ , torch.Tensor ): return mask elif isinstance(a_ , PIL.Image.Image ): A =[mask] if isinstance(mask[0] , PIL.Image.Image ): A , A =mask[0].size A , A =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A =[np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] A =np.concatenate(a_ , axis=0 ) A =mask.astype(np.floataa ) / 255.0 A =0 A =1 A =torch.from_numpy(a_ ) elif isinstance(mask[0] , torch.Tensor ): A =torch.cat(a_ , dim=0 ) return mask class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = 42 _A = 42 def __init__( self : str , snake_case__ : List[str] , snake_case__ : int ): """simple docstring""" super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self : List[str] , snake_case__ : Union[torch.Tensor, PIL.Image.Image] , snake_case__ : Union[torch.Tensor, PIL.Image.Image] , snake_case__ : int = 2_50 , snake_case__ : float = 0.0 , snake_case__ : int = 10 , snake_case__ : int = 10 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): """simple docstring""" A =image A =_preprocess_image(snake_case__ ) A =original_image.to(device=self.device , dtype=self.unet.dtype ) A =_preprocess_mask(snake_case__ ) A =mask_image.to(device=self.device , dtype=self.unet.dtype ) A =original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) A =original_image.shape A =randn_tensor(snake_case__ , generator=snake_case__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(snake_case__ , snake_case__ , snake_case__ , self.device ) A =eta A =self.scheduler.timesteps[0] + 1 A =generator[0] if isinstance(snake_case__ , snake_case__ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual A =self.unet(snake_case__ , snake_case__ ).sample # compute previous image: x_t -> x_t-1 A =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample else: # compute the reverse: x_t-1 -> x_t A =self.scheduler.undo_step(snake_case__ , snake_case__ , snake_case__ ) A =t A =(image / 2 + 0.5).clamp(0 , 1 ) A =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ )
689
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __a = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __a = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __a = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Any ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ): """simple docstring""" A =len(references[0] ) if any(len(snake_case__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A =[[refs[i] for refs in references] for i in range(snake_case__ )] A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =sb_chrf.corpus_score(snake_case__ , snake_case__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
689
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) __a = getLogger(__name__) def UpperCamelCase_ ( a_ , a_ , a_ , a_ = 8 , a_ = 1024 , a_="val" , a_=None , a_=False , a_="summarization" , a_=None , a_=1 , a_ = None , a_="" , **a_ , ) ->Dict: A =str(a_ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=a_ ) A =Path(a_ ) A =save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(a_ ) A =AutoModelForSeqaSeqLM.from_pretrained(a_ ).cuda() if fpaa: A =model.half() # determine if we need to increase num_beams use_task_specific_params(a_ , a_ ) # update config with task specific params A =generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: A =num_return_sequences A =AutoTokenizer.from_pretrained(a_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: A =tokenizer.model_max_length if prefix is None: A =prefix or getattr(model.config , "prefix" , "" ) or "" A =SeqaSeqDataset( a_ , a_ , a_ , max_target_length=1024 , type_path=a_ , n_obs=a_ , prefix=a_ , **a_ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. A =ds.make_sortish_sampler(a_ , distributed=a_ , add_extra_examples=a_ , shuffle=a_ ) A =DataLoader(a_ , sampler=a_ , batch_size=a_ , collate_fn=ds.collate_fn ) A =[] for batch in tqdm(a_ ): A =model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=a_ , num_beams=a_ , **a_ , ) A =tokenizer.batch_decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) A =batch["ids"] if num_return_sequences > 1: A =chunks(a_ , a_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(a_ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(a_ , a_ ) return results, sampler.num_replicas def UpperCamelCase_ ( ) ->List[str]: A =argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=a_ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=a_ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=a_ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=a_ , default=a_ ) parser.add_argument( "--type_path" , type=a_ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=a_ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=a_ , default=8 , required=a_ , help="batch size" ) parser.add_argument( "--local_rank" , type=a_ , default=-1 , required=a_ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=a_ , default=a_ , required=a_ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=a_ , default=1 , required=a_ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=a_ , default=600 , required=a_ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=a_ , default=a_ , required=a_ ) parser.add_argument("--tgt_lang" , type=a_ , default=a_ , required=a_ ) parser.add_argument( "--prefix" , type=a_ , required=a_ , default=a_ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) A =time.time() A , A =parser.parse_known_args() A =parse_numeric_n_bool_cl_kwargs(a_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) A =Path(args.save_dir + "_tmp" ) Path(a_ ).mkdir(exist_ok=a_ ) # this handles locking. A =list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. A ={} if args.src_lang is not None: A =args.src_lang if args.tgt_lang is not None: A =args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=a_ ) A , A =eval_data_dir( args.data_dir , a_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=a_ , **a_ , ) if args.local_rank <= 0: A =Path(args.save_dir ) save_dir.mkdir(exist_ok=a_ ) A =gather_results_from_each_node(a_ , a_ , args.sync_timeout ) A =combine_partial_results(a_ ) if args.num_return_sequences > 1: A =save_dir.joinpath("pseudolabel_results.json" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(a_ , a_ ) return A =Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(a_ ) as f: A =[x.rstrip() for x in f.readlines()][: len(a_ )] # Calculate metrics, save metrics, and save _generations.txt A ="translation" in args.task A =calculate_bleu if calc_bleu else calculate_rouge A ="bleu" if calc_bleu else "rouge" A =score_fn(a_ , a_ ) A =len(a_ ) A =time.time() - start_time A =round(runtime / metrics["n_obs"] , 4 ) A =num_replicas # TODO(@stas00): add whatever metadata to metrics A =save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(a_ , a_ , indent=a_ ) print(a_ ) write_txt_file(a_ , save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(a_ , save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(a_ ) def UpperCamelCase_ ( a_ ) ->List: A =[] for partial_result in partial_results: records.extend(a_ ) A =sorted(a_ , key=lambda a_ : x["id"] ) A =[x["pred"] for x in records] return preds def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[Dict[str, List]]: # WAIT FOR lots of .json files A =time.time() logger.info("waiting for all nodes to finish" ) A =None while (time.time() - start_wait) < timeout: A =list(save_dir.glob("rank_*.json" ) ) if len(a_ ) < num_replicas: continue try: # make sure all json files are fully saved A =lmap(a_ , a_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
from collections import Counter from timeit import timeit def UpperCamelCase_ ( a_ = "" , ) ->bool: return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def UpperCamelCase_ ( a_ = "" ) ->bool: if len(a_ ) == 0: return True A =input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string A ={} for character in lower_case_input_str: A =character_freq_dict.get(a_ , 0 ) + 1 A =0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def UpperCamelCase_ ( a_ = "" ) ->None: print("\nFor string = " , a_ , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a_ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a_ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": __a = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) __a = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
689
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : List[str] , snake_case__ : Optional[int] , ): """simple docstring""" A =parent A =13 A =7 A =True A =True A =True A =True A =True A =False A =False A =False A =2 A =99 A =0 A =32 A =2 A =4 A =0.1 A =0.1 A =5_12 A =16 A =2 A =0.02 A =3 A =4 A ="last" A =True A =None A =0 def _a ( self : Optional[Any] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A =None if self.use_input_lengths: A =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFlaubertModel(config=snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertWithLMHeadModel(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ): """simple docstring""" A =TFFlaubertForQuestionAnsweringSimple(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertForSequenceClassification(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ): """simple docstring""" A =self.num_labels A =TFFlaubertForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =self.num_choices A =TFFlaubertForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Any ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={ "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _A = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _A = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self : Optional[int] ): """simple docstring""" A =TFFlaubertModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =TFFlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Tuple ): """simple docstring""" A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A =tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A =model(snake_case__ )[0] A =tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. A =tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
689
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a = 1_6 __a = 3_2 def UpperCamelCase_ ( a_ , a_ = 16 ) ->Dict: A =AutoTokenizer.from_pretrained("bert-base-cased" ) A =load_dataset("glue" , "mrpc" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) A =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A =datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A =tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. A =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A =16 elif accelerator.mixed_precision != "no": A =8 else: A =None return tokenizer.pad( a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , ) # Instantiate dataloaders. A =DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) A =DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __a = mocked_dataloaders # noqa: F811 def UpperCamelCase_ ( a_ , a_ ) ->Dict: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1": A =2 # Initialize accelerator A =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A =config["lr"] A =int(config["num_epochs"] ) A =int(config["seed"] ) A =int(config["batch_size"] ) A =evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation A =1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A =batch_size // MAX_GPU_BATCH_SIZE A =MAX_GPU_BATCH_SIZE set_seed(a_ ) A , A =get_dataloaders(a_ , a_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A =model.to(accelerator.device ) # Instantiate optimizer A =AdamW(params=model.parameters() , lr=a_ ) # Instantiate scheduler A =get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A =accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A =model(**a_ ) A =outputs.loss A =loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() A =0 for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A =model(**a_ ) A =outputs.logits.argmax(dim=-1 ) A , A =accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(a_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples A =predictions[: len(eval_dataloader.dataset ) - samples_seen] A =references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=a_ , references=a_ , ) A =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , a_ ) def UpperCamelCase_ ( ) ->List[str]: A =argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) A =parser.parse_args() A ={"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(a_ , a_ ) if __name__ == "__main__": main()
689
from __future__ import annotations def UpperCamelCase_ ( a_ ) ->None: create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] ) def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None: if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A =True create_state_space_tree(a_ , a_ , index + 1 , a_ ) current_sequence.pop() A =False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
689
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" _A = 1 @register_to_config def __init__( self : Optional[int] , snake_case__ : int = 10_00 , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None ): """simple docstring""" self.set_timesteps(snake_case__ ) # standard deviation of the initial noise distribution A =1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. A =4 # running values A =[] def _a ( self : str , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ): """simple docstring""" A =num_inference_steps A =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] A =torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: A =torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: A =torch.sin(steps * math.pi / 2 ) ** 2 A =(1.0 - self.betas**2) ** 0.5 A =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] A =timesteps.to(snake_case__ ) A =[] def _a ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) A =(self.timesteps == timestep).nonzero().item() A =timestep_index + 1 A =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(snake_case__ ) if len(self.ets ) == 1: A =self.ets[-1] elif len(self.ets ) == 2: A =(3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: A =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: A =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) A =self._get_prev_sample(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case__ ) def _a ( self : List[str] , snake_case__ : torch.FloatTensor , *snake_case__ : List[str] , **snake_case__ : List[Any] ): """simple docstring""" return sample def _a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ): """simple docstring""" A =self.alphas[timestep_index] A =self.betas[timestep_index] A =self.alphas[prev_timestep_index] A =self.betas[prev_timestep_index] A =(sample - sigma * ets) / max(snake_case__ , 1E-8 ) A =next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[str] ): """simple docstring""" return self.config.num_train_timesteps
689
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) A =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _a ( self : int ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) A =DDPMScheduler() A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 ) A =output.audios[0] A =output.images[0] A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) A =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 A =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) A =DDIMScheduler() A =self.dummy_vqvae_and_unet A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) A =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 A =self.dummy_unet_condition A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =torch.rand((1, 1, 10) ) A =pipe(generator=snake_case__ , encoding=snake_case__ ) A =output.images[0] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ): """simple docstring""" A =torch_device A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ ) A =output.audios[0] A =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
689
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __a = logging.get_logger(__name__) # pylint: disable=invalid-name __a = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def UpperCamelCase_ ( a_ , a_ , a_=8 ) ->str: A =height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A =width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ): """simple docstring""" super().__init__() self.register_modules( unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , ) A =2 ** (len(self.movq.config.block_out_channels ) - 1) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" if latents is None: A =randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) A =latents.to(snake_case__ ) A =latents * scheduler.init_noise_sigma return latents def _a ( self : Dict , snake_case__ : List[Any]=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A =torch.device(f'''cuda:{gpu_id}''' ) A =[ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case__ , snake_case__ ) def _a ( self : List[Any] , snake_case__ : Tuple=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A =torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=snake_case__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A =None for cpu_offloaded_model in [self.unet, self.movq]: A , A =cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ ) # We'll offload the last model manually. A =hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _a ( self : Tuple ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case__ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case__ ) def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : torch.FloatTensor , snake_case__ : int = 5_12 , snake_case__ : int = 5_12 , snake_case__ : int = 1_00 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): """simple docstring""" A =self._execution_device A =guidance_scale > 1.0 if isinstance(snake_case__ , snake_case__ ): A =torch.cat(snake_case__ , dim=0 ) if isinstance(snake_case__ , snake_case__ ): A =torch.cat(snake_case__ , dim=0 ) if isinstance(snake_case__ , snake_case__ ): A =torch.cat(snake_case__ , dim=0 ) A =image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A =image_embeds.repeat_interleave(snake_case__ , dim=0 ) A =negative_image_embeds.repeat_interleave(snake_case__ , dim=0 ) A =hint.repeat_interleave(snake_case__ , dim=0 ) A =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ ) A =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ ) self.scheduler.set_timesteps(snake_case__ , device=snake_case__ ) A =self.scheduler.timesteps A =self.movq.config.latent_channels A , A =downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor ) # create initial latent A =self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance A =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A ={"image_embeds": image_embeds, "hint": hint} A =self.unet( sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0] if do_classifier_free_guidance: A , A =noise_pred.split(latents.shape[1] , dim=1 ) A , A =noise_pred.chunk(2 ) A , A =variance_pred.chunk(2 ) A =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A =torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A , A =noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A =self.scheduler.step( snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0] # post-processing A =self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: A =image * 0.5 + 0.5 A =image.clamp(0 , 1 ) A =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ )
689
import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A =find_backend(" if not is_torch_available():" ) self.assertEqual(snake_case__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A =find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A =find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" ) def _a ( self : List[Any] ): """simple docstring""" A =read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , snake_case__ ) self.assertIn("torch_and_transformers" , snake_case__ ) self.assertIn("flax_and_transformers" , snake_case__ ) self.assertIn("torch_and_transformers_and_onnx" , snake_case__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def _a ( self : Dict ): """simple docstring""" A =create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(snake_case__ , "\nCONSTANT = None\n" ) A =create_dummy_object("function" , "'torch'" ) self.assertEqual( snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" A =create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , snake_case__ )
689
1
from math import factorial def UpperCamelCase_ ( a_ , a_ , a_ ) ->float: if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) A =(prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A =float(factorial(a_ ) ) coefficient /= factorial(a_ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
689
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
1
def UpperCamelCase_ ( a_ ) ->list: A =int(a_ ) if n_element < 1: A =ValueError("a should be a positive number" ) raise my_error A =[1] A , A , A =(0, 0, 0) A =1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __a = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") __a = hamming(int(n)) print("""-----------------------------------------------------""") print(F'''The list with nth numbers is: {hamming_numbers}''') print("""-----------------------------------------------------""")
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
__a = """Alexander Joslin""" import operator as op from .stack import Stack def UpperCamelCase_ ( a_ ) ->int: A ={"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} A =Stack() A =Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(a_ ) ) elif i in operators: # RULE 2 operator_stack.push(a_ ) elif i == ")": # RULE 4 A =operator_stack.peek() operator_stack.pop() A =operand_stack.peek() operand_stack.pop() A =operand_stack.peek() operand_stack.pop() A =operators[opr](a_ , a_ ) operand_stack.push(a_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __a = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
689
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int: try: A =int(a_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A =2 A =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A =i while n % i == 0: A =n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __a = logging.get_logger(__name__) __a = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" _A = "nat" _A = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , snake_case__ : int=4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Tuple=[3, 4, 6, 5] , snake_case__ : int=[2, 4, 8, 16] , snake_case__ : List[str]=7 , snake_case__ : Any=3.0 , snake_case__ : str=True , snake_case__ : List[str]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[Any]="gelu" , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=1E-5 , snake_case__ : Any=0.0 , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , **snake_case__ : int , ): """simple docstring""" super().__init__(**snake_case__ ) A =patch_size A =num_channels A =embed_dim A =depths A =len(snake_case__ ) A =num_heads A =kernel_size A =mlp_ratio A =qkv_bias A =hidden_dropout_prob A =attention_probs_dropout_prob A =drop_path_rate A =hidden_act A =layer_norm_eps A =initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A =int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) A =layer_scale_init_value A =["stem"] + [f'''stage{idx}''' for idx in range(1 , len(snake_case__ ) + 1 )] A , A =get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
689
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
1
def UpperCamelCase_ ( a_ , a_ ) ->Dict: A =0 A =len(a_ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a_ ): return None A =sorted_collection[point] if current_item == item: return point else: if point < left: A =left A =point elif point > right: A =right A =point else: if item < current_item: A =point - 1 else: A =point + 1 return None def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->List[Any]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a_ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(a_ , a_ , a_ , a_ ) elif point > right: return interpolation_search_by_recursion(a_ , a_ , a_ , a_ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( a_ , a_ , a_ , point - 1 ) else: return interpolation_search_by_recursion( a_ , a_ , point + 1 , a_ ) def UpperCamelCase_ ( a_ ) ->Any: if collection != sorted(a_ ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys __a = 0 if debug == 1: __a = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") __a = 6_7 __a = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print("""Not found""")
689
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
689
1
def UpperCamelCase_ ( a_ ) ->int: A =[[0 for _ in range(a_ )] for _ in range(m + 1 )] for i in range(m + 1 ): A =1 for n in range(m + 1 ): for k in range(1 , a_ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __a = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: __a = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
689
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __a = logging.getLogger(__name__) def UpperCamelCase_ ( a_ , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , ) ->Tuple: A =bnb_quantization_config.load_in_abit A =bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) A =[] # custom device map if isinstance(a_ , a_ ) and len(device_map.keys() ) > 1: A =[key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A =get_keys_to_not_convert(a_ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(a_ ) A =bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A =[] A =bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(a_ ) # compatibility with peft A =load_in_abit A =load_in_abit A =get_parameter_device(a_ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) A =replace_with_bnb_layers(a_ , a_ , modules_to_not_convert=a_ ) # convert param to the right dtype A =bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A =name.replace(".weight" , "" ).replace(".bias" , "" ) A =getattr(a_ , a_ , a_ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(a_ ): param.to(a_ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A =replace_with_bnb_layers( a_ , a_ , modules_to_not_convert=a_ ) A =get_quantized_model_device_map( a_ , a_ , a_ , max_memory=a_ , no_split_module_classes=a_ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A =True A =any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( a_ , a_ , a_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=a_ , offload_state_dict=a_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(a_ , device_map=a_ , offload_dir=a_ ) def UpperCamelCase_ ( a_ , a_ , a_=None , a_=None , a_=None ) ->Optional[Any]: if device_map is None: if torch.cuda.is_available(): A ={"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(a_ , a_ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) A ={} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A ={} A =special_dtypes A =no_split_module_classes A =bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A =get_balanced_memory( a_ , low_zero=(device_map == "balanced_low_0") , max_memory=a_ , **a_ , ) A =max_memory A =infer_auto_device_map(a_ , **a_ ) if isinstance(a_ , a_ ): # check if don't have any quantized module on the cpu A =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A ={ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def UpperCamelCase_ ( a_ , a_ , a_=None , a_=None ) ->Tuple: if modules_to_not_convert is None: A =[] A , A =_replace_with_bnb_layers( a_ , a_ , a_ , a_ ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def UpperCamelCase_ ( a_ , a_ , a_=None , a_=None , ) ->Union[str, Any]: A =False for name, module in model.named_children(): if current_key_name is None: A =[] current_key_name.append(a_ ) if isinstance(a_ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A =".".join(a_ ) A =True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A =False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A =bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=a_ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A =bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) A =module.weight.data if module.bias is not None: A =module.bias.data bnb_module.requires_grad_(a_ ) setattr(a_ , a_ , a_ ) A =True if len(list(module.children() ) ) > 0: A , A =_replace_with_bnb_layers( a_ , a_ , a_ , a_ ) A =has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase_ ( a_ ) ->Dict: # Create a copy of the model with init_empty_weights(): A =deepcopy(a_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A =find_tied_parameters(a_ ) # For compatibility with Accelerate < 0.18 if isinstance(a_ , a_ ): A =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A =sum(a_ , [] ) A =len(a_ ) > 0 # Check if it is a base model A =False if hasattr(a_ , "base_model_prefix" ): A =not hasattr(a_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A =list(model.named_children() ) A =[list_modules[-1][0]] # add last module together with tied weights A =set(a_ ) - set(a_ ) A =list(set(a_ ) ) + list(a_ ) # remove ".weight" from the keys A =[".weight", ".bias"] A =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A =name.replace(a_ , "" ) filtered_module_names.append(a_ ) return filtered_module_names def UpperCamelCase_ ( a_ ) ->List[Any]: for m in model.modules(): if isinstance(a_ , bnb.nn.Linearabit ): return True return False def UpperCamelCase_ ( a_ ) ->List[str]: return next(parameter.parameters() ).device def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) ->Optional[int]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(a_ , a_ , 0 , dtype=a_ , value=a_ ) A =param_name A =model if "." in tensor_name: A =tensor_name.split("." ) for split in splits[:-1]: A =getattr(a_ , a_ ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A =new_module A =splits[-1] # offload weights A =False offload_weight(module._parameters[tensor_name] , a_ , a_ , index=a_ ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , a_ , index=a_ , ) else: offload_weight(a_ , a_ , a_ , index=a_ ) offload_weight(a_ , param_name.replace("weight" , "SCB" ) , a_ , index=a_ ) set_module_tensor_to_device(a_ , a_ , "meta" , dtype=a_ , value=torch.empty(*param.size() ) )
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""MobileViTFeatureExtractor"""] __a = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
__a = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
689
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase_ ( ) ->None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
689
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "naver-clova-ix/donut-base-finetuned-docvqa" _A = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) _A = "document_qa" _A = AutoProcessor _A = VisionEncoderDecoderModel _A = ["image", "text"] _A = ["text"] def __init__( self : Union[str, Any] , *snake_case__ : int , **snake_case__ : List[Any] ): """simple docstring""" if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , snake_case__ : "Image" , snake_case__ : str ): """simple docstring""" A ="<s_docvqa><s_question>{user_input}</s_question><s_answer>" A =task_prompt.replace("{user_input}" , snake_case__ ) A =self.pre_processor.tokenizer( snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" ).input_ids A =self.pre_processor(snake_case__ , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Tuple , snake_case__ : List[str] ): """simple docstring""" return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences def _a ( self : Optional[Any] , snake_case__ : Optional[int] ): """simple docstring""" A =self.pre_processor.batch_decode(snake_case__ )[0] A =sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) A =sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) A =re.sub(R"<.*?>" , "" , snake_case__ , count=1 ).strip() # remove first task start token A =self.pre_processor.tokenajson(snake_case__ ) return sequence["answer"]
689
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : List[str] , snake_case__ : Optional[int] , ): """simple docstring""" A =parent A =13 A =7 A =True A =True A =True A =True A =True A =False A =False A =False A =2 A =99 A =0 A =32 A =2 A =4 A =0.1 A =0.1 A =5_12 A =16 A =2 A =0.02 A =3 A =4 A ="last" A =True A =None A =0 def _a ( self : Optional[Any] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A =None if self.use_input_lengths: A =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFlaubertModel(config=snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertWithLMHeadModel(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ): """simple docstring""" A =TFFlaubertForQuestionAnsweringSimple(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertForSequenceClassification(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ): """simple docstring""" A =self.num_labels A =TFFlaubertForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =self.num_choices A =TFFlaubertForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Any ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={ "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _A = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _A = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self : Optional[int] ): """simple docstring""" A =TFFlaubertModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =TFFlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Tuple ): """simple docstring""" A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A =tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A =model(snake_case__ )[0] A =tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. A =tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
689
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
1
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def UpperCamelCase_ ( a_ ) ->Optional[int]: if hor == 128: A =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") A =(32, 128, 256) A =("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: A =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") A =(32, 64, 128, 256) A =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") A =torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) A =model.state_dict() A ={ "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_5536, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } A =UNetaDModel(**a_ ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) A =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A =state_dict.pop(a_ ) hf_value_function.load_state_dict(a_ ) torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f: json.dump(a_ , a_ ) def UpperCamelCase_ ( ) ->Optional[int]: A ={ "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 128, 256), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_5536, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } A =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) A =model A =UNetaDModel(**a_ ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) A =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A =state_dict.pop(a_ ) hf_value_function.load_state_dict(a_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(a_ , a_ ) if __name__ == "__main__": unet(3_2) # unet(128) value_function()
689
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
1
def UpperCamelCase_ ( a_ ) ->bool: if not isinstance(a_ , a_ ): raise ValueError("check_bouncy() accepts only integer arguments" ) A =str(a_ ) A ="".join(sorted(a_ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def UpperCamelCase_ ( a_ = 99 ) ->int: if not 0 < percent < 100: raise ValueError("solution() only accepts values from 0 to 100" ) A =0 A =1 while True: if check_bouncy(a_ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(9_9)}''')
689
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } __a = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } __a = { """ctrl""": 2_5_6, } __a = { """Pregnancy""": 1_6_8_6_2_9, """Christianity""": 7_6_7_5, """Explain""": 1_0_6_4_2_3, """Fitness""": 6_3_4_4_0, """Saving""": 6_3_1_6_3, """Ask""": 2_7_1_7_1, """Ass""": 9_5_9_8_5, """Joke""": 1_6_3_5_0_9, """Questions""": 4_5_6_2_2, """Thoughts""": 4_9_6_0_5, """Retail""": 5_2_3_4_2, """Feminism""": 1_6_4_3_3_8, """Writing""": 1_1_9_9_2, """Atheism""": 1_9_2_2_6_3, """Netflix""": 4_8_6_1_6, """Computing""": 3_9_6_3_9, """Opinion""": 4_3_2_1_3, """Alone""": 4_4_9_6_7, """Funny""": 5_8_9_1_7, """Gaming""": 4_0_3_5_8, """Human""": 4_0_8_8, """India""": 1_3_3_1, """Joker""": 7_7_1_3_8, """Diet""": 3_6_2_0_6, """Legal""": 1_1_8_5_9, """Norman""": 4_9_3_9, """Tip""": 7_2_6_8_9, """Weight""": 5_2_3_4_3, """Movies""": 4_6_2_7_3, """Running""": 2_3_4_2_5, """Science""": 2_0_9_0, """Horror""": 3_7_7_9_3, """Confession""": 6_0_5_7_2, """Finance""": 1_2_2_5_0, """Politics""": 1_6_3_6_0, """Scary""": 1_9_1_9_8_5, """Support""": 1_2_6_5_4, """Technologies""": 3_2_5_1_6, """Teenage""": 6_6_1_6_0, """Event""": 3_2_7_6_9, """Learned""": 6_7_4_6_0, """Notion""": 1_8_2_7_7_0, """Wikipedia""": 3_7_5_8_3, """Books""": 6_6_6_5, """Extract""": 7_6_0_5_0, """Confessions""": 1_0_2_7_0_1, """Conspiracy""": 7_5_9_3_2, """Links""": 6_3_6_7_4, """Narcissus""": 1_5_0_4_2_5, """Relationship""": 5_4_7_6_6, """Relationships""": 1_3_4_7_9_6, """Reviews""": 4_1_6_7_1, """News""": 4_2_5_6, """Translation""": 2_6_8_2_0, """multilingual""": 1_2_8_4_0_6, } def UpperCamelCase_ ( a_ ) ->List[str]: A =set() A =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A =char A =set(a_ ) return pairs class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = CONTROL_CODES def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ): """simple docstring""" super().__init__(unk_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: A =json.load(snake_case__ ) A ={v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: A =merges_handle.read().split("\n" )[1:-1] A =[tuple(merge.split() ) for merge in merges] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A ={} @property def _a ( self : str ): """simple docstring""" return len(self.encoder ) def _a ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : int , snake_case__ : Any ): """simple docstring""" if token in self.cache: return self.cache[token] A =tuple(snake_case__ ) A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A =get_pairs(snake_case__ ) if not pairs: return token while True: A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A , A =bigram A =[] A =0 while i < len(snake_case__ ): try: A =word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A =j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A =tuple(snake_case__ ) A =new_word if len(snake_case__ ) == 1: break else: A =get_pairs(snake_case__ ) A ="@@ ".join(snake_case__ ) A =word[:-4] A =word return word def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" A =[] A =re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _a ( self : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : Any ): """simple docstring""" A =" ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) A =0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A =token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
689
1
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __a = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __a = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def UpperCamelCase_ ( a_ ) ->Optional[int]: A =numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=a_ )[0] @deprecated(a_ , "Please use tf.data to implement this functionality." ) def UpperCamelCase_ ( a_ ) ->List[str]: print("Extracting" , f.name ) with gzip.GzipFile(fileobj=a_ ) as bytestream: A =_readaa(a_ ) if magic != 2051: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) A =_readaa(a_ ) A =_readaa(a_ ) A =_readaa(a_ ) A =bytestream.read(rows * cols * num_images ) A =numpy.frombuffer(a_ , dtype=numpy.uinta ) A =data.reshape(a_ , a_ , a_ , 1 ) return data @deprecated(a_ , "Please use tf.one_hot on tensors." ) def UpperCamelCase_ ( a_ , a_ ) ->List[Any]: A =labels_dense.shape[0] A =numpy.arange(a_ ) * num_classes A =numpy.zeros((num_labels, num_classes) ) A =1 return labels_one_hot @deprecated(a_ , "Please use tf.data to implement this functionality." ) def UpperCamelCase_ ( a_ , a_=False , a_=10 ) ->Optional[Any]: print("Extracting" , f.name ) with gzip.GzipFile(fileobj=a_ ) as bytestream: A =_readaa(a_ ) if magic != 2049: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) A =_readaa(a_ ) A =bytestream.read(a_ ) A =numpy.frombuffer(a_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(a_ , a_ ) return labels class UpperCamelCase__: """simple docstring""" @deprecated( snake_case__ , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Any=dtypes.floataa , snake_case__ : Dict=True , snake_case__ : List[Any]=None , ): """simple docstring""" A , A =random_seed.get_seed(snake_case__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) A =dtypes.as_dtype(snake_case__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype ) if fake_data: A =1_00_00 A =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' A =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 A =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. A =images.astype(numpy.floataa ) A =numpy.multiply(snake_case__ , 1.0 / 255.0 ) A =images A =labels A =0 A =0 @property def _a ( self : Optional[Any] ): """simple docstring""" return self._images @property def _a ( self : Any ): """simple docstring""" return self._labels @property def _a ( self : Tuple ): """simple docstring""" return self._num_examples @property def _a ( self : Dict ): """simple docstring""" return self._epochs_completed def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False , snake_case__ : str=True ): """simple docstring""" if fake_data: A =[1] * 7_84 A =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(snake_case__ )], [fake_label for _ in range(snake_case__ )], ) A =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: A =numpy.arange(self._num_examples ) numpy.random.shuffle(snake_case__ ) A =self.images[perma] A =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch A =self._num_examples - start A =self._images[start : self._num_examples] A =self._labels[start : self._num_examples] # Shuffle the data if shuffle: A =numpy.arange(self._num_examples ) numpy.random.shuffle(snake_case__ ) A =self.images[perm] A =self.labels[perm] # Start next epoch A =0 A =batch_size - rest_num_examples A =self._index_in_epoch A =self._images[start:end] A =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size A =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(a_ , "Please write your own downloading logic." ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->Dict: if not gfile.Exists(a_ ): gfile.MakeDirs(a_ ) A =os.path.join(a_ , a_ ) if not gfile.Exists(a_ ): urllib.request.urlretrieve(a_ , a_ ) # noqa: S310 with gfile.GFile(a_ ) as f: A =f.size() print("Successfully downloaded" , a_ , a_ , "bytes." ) return filepath @deprecated( a_ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" ) def UpperCamelCase_ ( a_ , a_=False , a_=False , a_=dtypes.floataa , a_=True , a_=5000 , a_=None , a_=DEFAULT_SOURCE_URL , ) ->List[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=a_ , one_hot=a_ , dtype=a_ , seed=a_ ) A =fake() A =fake() A =fake() return _Datasets(train=a_ , validation=a_ , test=a_ ) if not source_url: # empty string check A =DEFAULT_SOURCE_URL A ="train-images-idx3-ubyte.gz" A ="train-labels-idx1-ubyte.gz" A ="t10k-images-idx3-ubyte.gz" A ="t10k-labels-idx1-ubyte.gz" A =_maybe_download( a_ , a_ , source_url + train_images_file ) with gfile.Open(a_ , "rb" ) as f: A =_extract_images(a_ ) A =_maybe_download( a_ , a_ , source_url + train_labels_file ) with gfile.Open(a_ , "rb" ) as f: A =_extract_labels(a_ , one_hot=a_ ) A =_maybe_download( a_ , a_ , source_url + test_images_file ) with gfile.Open(a_ , "rb" ) as f: A =_extract_images(a_ ) A =_maybe_download( a_ , a_ , source_url + test_labels_file ) with gfile.Open(a_ , "rb" ) as f: A =_extract_labels(a_ , one_hot=a_ ) if not 0 <= validation_size <= len(a_ ): A =( "Validation size should be between 0 and " f'''{len(a_ )}. Received: {validation_size}.''' ) raise ValueError(a_ ) A =train_images[:validation_size] A =train_labels[:validation_size] A =train_images[validation_size:] A =train_labels[validation_size:] A ={"dtype": dtype, "reshape": reshape, "seed": seed} A =_DataSet(a_ , a_ , **a_ ) A =_DataSet(a_ , a_ , **a_ ) A =_DataSet(a_ , a_ , **a_ ) return _Datasets(train=a_ , validation=a_ , test=a_ )
689
def UpperCamelCase_ ( a_ , a_ ) ->list[int]: A =int(a_ ) # Initialize Result A =[] # Traverse through all denomination for denomination in reversed(a_ ): # Find denominations while int(a_ ) >= int(a_ ): total_value -= int(a_ ) answer.append(a_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __a = [] __a = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __a = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __a = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] __a = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __a = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
689
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = CTRLTokenizer _A = False _A = False def _a ( self : Optional[int] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] A ={"unk_token": "<unk>"} A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def _a ( self : Tuple , **snake_case__ : Optional[int] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : List[Any] ): """simple docstring""" A ="adapt react readapt apt" A ="adapt react readapt apt" return input_text, output_text def _a ( self : List[Any] ): """simple docstring""" A =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A ="adapt react readapt apt" A ="adapt re@@ a@@ c@@ t re@@ adapt apt".split() A =tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) A =tokens + [tokenizer.unk_token] A =[0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
689
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = ["""model.decoder.embed_positions.weights"""] def UpperCamelCase_ ( a_ ) ->List[str]: if "emb" in name: A =name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: A =name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: A =name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: A =name.replace("linear1" , "fc1" ) if "linear2" in name: A =name.replace("linear2" , "fc2" ) if "norm1" in name: A =name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: A =name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: A =name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: A =name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: A =name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]: A =list(state_dict.keys() ) A ={} for key in keys: A =state_dict.pop(a_ ) A =rename_keys(a_ ) if "in_proj_weight" in key: # split fused qkv proj A =val[:hidden_size, :] A =val[hidden_size : 2 * hidden_size, :] A =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A =val else: A =val return state_dict, enc_dec_proj_state_dict def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig: if checkpoint == "small": # default config values A =1024 A =24 A =16 elif checkpoint == "medium": A =1536 A =48 A =24 elif checkpoint == "large": A =2048 A =48 A =32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) A =MusicgenDecoderConfig( hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , ) return config @torch.no_grad() def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]: A =MusicGen.get_pretrained(a_ , device=a_ ) A =decoder_config_from_checkpoint(a_ ) A =fairseq_model.lm.state_dict() A , A =rename_state_dict( a_ , hidden_size=decoder_config.hidden_size ) A =TaEncoderModel.from_pretrained("t5-base" ) A =EncodecModel.from_pretrained("facebook/encodec_32khz" ) A =MusicgenForCausalLM(a_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A , A =decoder.load_state_dict(a_ , strict=a_ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(a_ ) if len(a_ ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(a_ ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(a_ ) # check we can do a forward pass A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A =model(input_ids=a_ , decoder_input_ids=a_ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor A =AutoTokenizer.from_pretrained("t5-base" ) A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ ) # set the appropriate bos/pad token ids A =2048 A =2048 # set other default generation config params A =int(30 * audio_encoder.config.frame_rate ) A =True A =3.0 if pytorch_dump_folder is not None: Path(a_ ).mkdir(exist_ok=a_ ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(a_ ) processor.push_to_hub(a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) __a = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
1
from ..utils import DummyObject, requires_backends class UpperCamelCase__( metaclass=lowerCAmelCase__ ): """simple docstring""" _A = ["onnx"] def __init__( self : Tuple , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def _a ( cls : List[str] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def _a ( cls : Tuple , *snake_case__ : List[str] , **snake_case__ : Any ): """simple docstring""" requires_backends(cls , ["onnx"] )
689
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_ ( a_ ) ->Tuple: A =FileLock(str(tmpdir / "foo.lock" ) ) A =FileLock(str(tmpdir / "foo.lock" ) ) A =0.01 with locka.acquire(): with pytest.raises(a_ ): A =time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def UpperCamelCase_ ( a_ ) ->List[Any]: A ="a" * 1000 + ".lock" A =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
689
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __a = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") __a = get_tests_dir("""fixtures/vocab.json""") __a = get_tests_dir("""fixtures""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" _A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] def _a ( self : Optional[int] ): """simple docstring""" A =0 def _a ( self : Optional[int] ): """simple docstring""" A =AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A =WavaVecaConfig() A =AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A =WavaVecaFeatureExtractor() A =AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) A =WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f: A =json.load(snake_case__ ) config_dict.pop("processor_class" ) with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f: f.write(json.dumps(snake_case__ ) ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A =WavaVecaFeatureExtractor() A =AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) A =WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f: A =json.load(snake_case__ ) config_dict.pop("processor_class" ) with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f: f.write(json.dumps(snake_case__ ) ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A =WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f: f.write("{}" ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(snake_case__ ): A =AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): A =AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ ) A =AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) A =processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) A =processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version A =AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) A =new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def _a ( self : Tuple ): """simple docstring""" try: AutoConfig.register("custom" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API A =CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: A =os.path.join(snake_case__ , "vocab.txt" ) with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A =CustomTokenizer(snake_case__ ) A =CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) A =AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ): """simple docstring""" class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = False class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = False class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "AutoFeatureExtractor" _A = "AutoTokenizer" _A = False try: AutoConfig.register("custom" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. A =AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A =AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A =AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : Optional[Any] ): """simple docstring""" A =AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" ) def _a ( self : Dict ): """simple docstring""" A =AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" ) @is_staging_test class UpperCamelCase__( unittest.TestCase ): """simple docstring""" _A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _a ( cls : Optional[Any] ): """simple docstring""" A =TOKEN HfFolder.save_token(snake_case__ ) @classmethod def _a ( cls : Dict ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor" ) except HTTPError: pass def _a ( self : Optional[int] ): """simple docstring""" A =WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , "test-processor" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) A =WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _a ( self : Optional[Any] ): """simple docstring""" A =WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , "test-processor-org" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="valid_org" , ) A =WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _a ( self : Optional[int] ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A =CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: A =os.path.join(snake_case__ , "vocab.txt" ) with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) A =CustomTokenizer(snake_case__ ) A =CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) A =Repository(snake_case__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , "tokenizer_config.json" ) ) as f: A =json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_processing.py" ) ) ) repo.push_to_hub() A =AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __a = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : int ): """simple docstring""" A =tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) ) A =self.transformer_dir shutil.copy( os.path.join(snake_case__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , ) def _a ( self : Any ): """simple docstring""" A ="src/transformers" shutil.rmtree(self.transformer_dir ) def _a ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Any=None ): """simple docstring""" A =comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: A =comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result A =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) A =black.format_str(snake_case__ , mode=snake_case__ ) A =os.path.join(self.transformer_dir , "new_code.py" ) with open(snake_case__ , "w" , newline="\n" ) as f: f.write(snake_case__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=snake_case__ ) with open(snake_case__ , "r" ) as f: self.assertTrue(f.read() , snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , snake_case__ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , snake_case__ ) , ) # Copy consistency with a really long name A ="TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , snake_case__ , snake_case__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , snake_case__ , overwrite_result=re.sub("Bert" , "TestModel" , snake_case__ ) , ) def _a ( self : Tuple ): """simple docstring""" A =check_copies.LOCALIZED_READMES["README_zh-hans.md"] A =( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) A =( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) A =( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) A , A =check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["format_model_list"] ) self.assertFalse(snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) A , A =check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(snake_case__ ) A =( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) A =( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) A =( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) A , A =check_copies.convert_to_localized_md( snake_case__ , snake_case__ , localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(snake_case__ , snake_case__ )
689
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __a = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __a = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __a = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Any ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ): """simple docstring""" A =len(references[0] ) if any(len(snake_case__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A =[[refs[i] for refs in references] for i in range(snake_case__ )] A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =sb_chrf.corpus_score(snake_case__ , snake_case__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
689
1
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { """facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""", """facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""", } class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "encodec" def __init__( self : Optional[int] , snake_case__ : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case__ : Tuple=2_40_00 , snake_case__ : Union[str, Any]=1 , snake_case__ : Optional[Any]=False , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : Any=1_28 , snake_case__ : Optional[Any]=32 , snake_case__ : Optional[Any]=1 , snake_case__ : List[Any]=[8, 5, 4, 2] , snake_case__ : Tuple="weight_norm" , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=7 , snake_case__ : List[str]=3 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=True , snake_case__ : Union[str, Any]="reflect" , snake_case__ : Dict=2 , snake_case__ : str=2 , snake_case__ : Any=1.0 , snake_case__ : Optional[Any]=10_24 , snake_case__ : Any=None , snake_case__ : Any=True , **snake_case__ : str , ): """simple docstring""" A =target_bandwidths A =sampling_rate A =audio_channels A =normalize A =chunk_length_s A =overlap A =hidden_size A =num_filters A =num_residual_layers A =upsampling_ratios A =norm_type A =kernel_size A =last_kernel_size A =residual_kernel_size A =dilation_growth_rate A =use_causal_conv A =pad_mode A =compress A =num_lstm_layers A =trim_right_ratio A =codebook_size A =codebook_dim if codebook_dim is not None else hidden_size A =use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**snake_case__ ) @property def _a ( self : Tuple ): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _a ( self : str ): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def _a ( self : List[Any] ): """simple docstring""" A =np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def _a ( self : Dict ): """simple docstring""" return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : List[str] , snake_case__ : Optional[int] , ): """simple docstring""" A =parent A =13 A =7 A =True A =True A =True A =True A =True A =False A =False A =False A =2 A =99 A =0 A =32 A =2 A =4 A =0.1 A =0.1 A =5_12 A =16 A =2 A =0.02 A =3 A =4 A ="last" A =True A =None A =0 def _a ( self : Optional[Any] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A =None if self.use_input_lengths: A =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFlaubertModel(config=snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertWithLMHeadModel(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ): """simple docstring""" A =TFFlaubertForQuestionAnsweringSimple(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertForSequenceClassification(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ): """simple docstring""" A =self.num_labels A =TFFlaubertForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =self.num_choices A =TFFlaubertForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Any ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={ "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _A = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _A = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self : Optional[int] ): """simple docstring""" A =TFFlaubertModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =TFFlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Tuple ): """simple docstring""" A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A =tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A =model(snake_case__ )[0] A =tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. A =tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
689
1
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) == 0 ) def UpperCamelCase_ ( ) ->None: assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
689
from __future__ import annotations def UpperCamelCase_ ( a_ ) ->None: create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] ) def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None: if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A =True create_state_space_tree(a_ , a_ , index + 1 , a_ ) current_sequence.pop() A =False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
689
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __a = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = ["pixel_values"] def __init__( self : str , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : bool = True , **snake_case__ : Optional[Any] , ): """simple docstring""" super().__init__(**snake_case__ ) A =size if size is not None else {"shortest_edge": 2_24} A =get_size_dict(snake_case__ , default_to_square=snake_case__ ) A =crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} A =get_size_dict(snake_case__ , default_to_square=snake_case__ , param_name="crop_size" ) A =do_resize A =size A =resample A =do_center_crop A =crop_size A =do_rescale A =rescale_factor A =do_normalize A =image_mean if image_mean is not None else OPENAI_CLIP_MEAN A =image_std if image_std is not None else OPENAI_CLIP_STD A =do_convert_rgb def _a ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ): """simple docstring""" A =get_size_dict(snake_case__ , default_to_square=snake_case__ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) A =get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ ) return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def _a ( self : Tuple , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] , ): """simple docstring""" A =get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ ) def _a ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ): """simple docstring""" return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def _a ( self : Any , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str , ): """simple docstring""" return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ ) def _a ( self : Optional[Any] , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : int = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : bool = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ): """simple docstring""" A =do_resize if do_resize is not None else self.do_resize A =size if size is not None else self.size A =get_size_dict(snake_case__ , param_name="size" , default_to_square=snake_case__ ) A =resample if resample is not None else self.resample A =do_center_crop if do_center_crop is not None else self.do_center_crop A =crop_size if crop_size is not None else self.crop_size A =get_size_dict(snake_case__ , param_name="crop_size" , default_to_square=snake_case__ ) A =do_rescale if do_rescale is not None else self.do_rescale A =rescale_factor if rescale_factor is not None else self.rescale_factor A =do_normalize if do_normalize is not None else self.do_normalize A =image_mean if image_mean is not None else self.image_mean A =image_std if image_std is not None else self.image_std A =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A =make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A =[convert_to_rgb(snake_case__ ) for image in images] # All transformations expect numpy arrays. A =[to_numpy_array(snake_case__ ) for image in images] if do_resize: A =[self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] if do_center_crop: A =[self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images] if do_rescale: A =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] if do_normalize: A =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images] A =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] A ={"pixel_values": images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
689
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) A =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _a ( self : int ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) A =DDPMScheduler() A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 ) A =output.audios[0] A =output.images[0] A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) A =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 A =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) A =DDIMScheduler() A =self.dummy_vqvae_and_unet A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) A =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 A =self.dummy_unet_condition A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =torch.rand((1, 1, 10) ) A =pipe(generator=snake_case__ , encoding=snake_case__ ) A =output.images[0] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ): """simple docstring""" A =torch_device A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ ) A =output.audios[0] A =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
689
1
import datasets __a = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ __a = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ __a = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def UpperCamelCase_ ( a_ , a_ ) ->Optional[int]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def _a ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): """simple docstring""" return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
689
import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A =find_backend(" if not is_torch_available():" ) self.assertEqual(snake_case__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A =find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A =find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" ) def _a ( self : List[Any] ): """simple docstring""" A =read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , snake_case__ ) self.assertIn("torch_and_transformers" , snake_case__ ) self.assertIn("flax_and_transformers" , snake_case__ ) self.assertIn("torch_and_transformers_and_onnx" , snake_case__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def _a ( self : Dict ): """simple docstring""" A =create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(snake_case__ , "\nCONSTANT = None\n" ) A =create_dummy_object("function" , "'torch'" ) self.assertEqual( snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" A =create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , snake_case__ )
689
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = CycleDiffusionPipeline _A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } _A = PipelineTesterMixin.required_optional_params - {"latents"} _A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) _A = IMAGE_TO_IMAGE_IMAGE_PARAMS _A = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=10_00 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) A =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) A =CLIPTextModel(snake_case__ ) A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _a ( self : List[Any] , snake_case__ : str , snake_case__ : Dict=0 ): """simple docstring""" A =floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) A =image / 2 + 0.5 if str(snake_case__ ).startswith("mps" ): A =torch.manual_seed(snake_case__ ) else: A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A ={ "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def _a ( self : Optional[Any] ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =self.get_dummy_components() A =CycleDiffusionPipeline(**snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A =pipe(**snake_case__ ) A =output.images A =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _a ( self : int ): """simple docstring""" A =self.get_dummy_components() for name, module in components.items(): if hasattr(snake_case__ , "half" ): A =module.half() A =CycleDiffusionPipeline(**snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A =pipe(**snake_case__ ) A =output.images A =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a ( self : List[Any] ): """simple docstring""" return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def _a ( self : Union[str, Any] ): """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def _a ( self : List[str] ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a ( self : str ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def _a ( self : List[str] ): """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : int ): """simple docstring""" A =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) A =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) A =init_image.resize((5_12, 5_12) ) A ="CompVis/stable-diffusion-v1-4" A =DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" ) A =CycleDiffusionPipeline.from_pretrained( snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() A ="A black colored car" A ="A blue colored car" A =torch.manual_seed(0 ) A =pipe( prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type="np" , ) A =output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a ( self : Dict ): """simple docstring""" A =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) A =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) A =init_image.resize((5_12, 5_12) ) A ="CompVis/stable-diffusion-v1-4" A =DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" ) A =CycleDiffusionPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() A ="A black colored car" A ="A blue colored car" A =torch.manual_seed(0 ) A =pipe( prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type="np" , ) A =output.images assert np.abs(image - expected_image ).max() < 2E-2
689
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
1
def UpperCamelCase_ ( a_ , a_ ) ->str: if not isinstance(a_ , a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_ , a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) A ="" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__: """simple docstring""" def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple=3 , snake_case__ : int=32 , snake_case__ : Optional[Any]=3 , snake_case__ : List[Any]=10 , snake_case__ : Optional[Any]=[10, 20, 30, 40] , snake_case__ : Dict=[1, 1, 2, 1] , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : str="relu" , snake_case__ : List[Any]=3 , snake_case__ : Dict=None , ): """simple docstring""" A =parent A =batch_size A =image_size A =num_channels A =embeddings_size A =hidden_sizes A =depths A =is_training A =use_labels A =hidden_act A =num_labels A =scope A =len(snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.num_labels ) A =self.get_config() return config, pixel_values, labels def _a ( self : List[str] ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ): """simple docstring""" A =RegNetModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ): """simple docstring""" A =self.num_labels A =RegNetForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Dict ): """simple docstring""" A =self.prepare_config_and_inputs() A , A , A =config_and_inputs A ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _A = ( {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) _A = False _A = False _A = False _A = False def _a ( self : List[str] ): """simple docstring""" A =RegNetModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : int ): """simple docstring""" return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _a ( self : List[str] ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A =model_class(snake_case__ ) A =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A =[*signature.parameters.keys()] A =["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def _a ( self : Optional[int] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A =model_class(config=snake_case__ ) for name, module in model.named_modules(): if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _a ( self : Any ): """simple docstring""" def check_hidden_states_output(snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[str] ): A =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) A =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A =self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A =self.model_tester.prepare_config_and_inputs_for_common() A =["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A =layer_type A =True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A =True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def _a ( self : List[Any] ): """simple docstring""" for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =RegNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCamelCase_ ( ) ->Tuple: A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Any ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[int] ): """simple docstring""" A =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ ) A =self.default_image_processor A =prepare_img() A =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): A =model(**snake_case__ ) # verify the logits A =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , snake_case__ ) A =torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
689
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int: try: A =int(a_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A =2 A =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A =i while n % i == 0: A =n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__: """simple docstring""" def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=13 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=False , snake_case__ : Optional[int]=True , snake_case__ : int=99 , snake_case__ : Any=32 , snake_case__ : Optional[int]=5 , snake_case__ : Tuple=4 , snake_case__ : Any=37 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : Dict=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : int=16 , snake_case__ : Dict=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Any=3 , snake_case__ : Dict=4 , snake_case__ : Any=None , ): """simple docstring""" A =parent A =batch_size A =seq_length A =is_training A =use_input_mask A =use_token_type_ids A =use_labels A =vocab_size A =hidden_size A =num_hidden_layers A =num_attention_heads A =intermediate_size A =hidden_act A =hidden_dropout_prob A =attention_probs_dropout_prob A =max_position_embeddings A =type_vocab_size A =type_sequence_label_size A =initializer_range A =num_labels A =num_choices A =scope def _a ( self : str ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =None if self.use_input_mask: A =random_attention_mask([self.batch_size, self.seq_length] ) A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , self.num_choices ) A =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Any ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , ) def _a ( self : Any , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =OpenLlamaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ ) A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Dict , ): """simple docstring""" A =True A =OpenLlamaModel(snake_case__ ) model.to(snake_case__ ) model.eval() A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) A =model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : int , ): """simple docstring""" A =OpenLlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : int , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , ): """simple docstring""" A =True A =True A =OpenLlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() # first forward pass A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) A =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A =ids_tensor((self.batch_size, 3) , config.vocab_size ) A =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A =torch.cat([input_ids, next_tokens] , dim=-1 ) A =torch.cat([input_mask, next_mask] , dim=-1 ) A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] A =model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice A =ids_tensor((1,) , output_from_past.shape[-1] ).item() A =output_from_no_past[:, -3:, random_slice_idx].detach() A =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) def _a ( self : int ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _A = (OpenLlamaForCausalLM,) if is_torch_available() else () _A = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] ): """simple docstring""" A =OpenLlamaModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def _a ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A =type self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A =OpenLlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : str ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A ="single_label_classification" A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A =OpenLlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Any ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =3 A ="multi_label_classification" A =input_dict["input_ids"] A =input_ids.ne(1 ).to(snake_case__ ) A =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A =OpenLlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" ) def _a ( self : Optional[Any] ): """simple docstring""" pass @parameterized.expand([("linear",), ("dynamic",)] ) def _a ( self : Optional[Any] , snake_case__ : Optional[int] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() A =ids_tensor([1, 10] , config.vocab_size ) A =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A =OpenLlamaModel(snake_case__ ) original_model.to(snake_case__ ) original_model.eval() A =original_model(snake_case__ ).last_hidden_state A =original_model(snake_case__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A ={"type": scaling_type, "factor": 10.0} A =OpenLlamaModel(snake_case__ ) scaled_model.to(snake_case__ ) scaled_model.eval() A =scaled_model(snake_case__ ).last_hidden_state A =scaled_model(snake_case__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
689
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
1
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __a = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """))) print("""Googling.....""") __a = F'''https://www.google.com/search?q={query}&num=100''' __a = requests.get( url, headers={"""User-Agent""": str(UserAgent().random)}, ) try: __a = ( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """yuRUbf"""}) .find("""a""") .get("""href""") ) except AttributeError: __a = parse_qs( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """kCrYT"""}) .find("""a""") .get("""href""") )["""url"""][0] webbrowser.open(link)
689
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
689
1
def UpperCamelCase_ ( a_ ) ->tuple[int, int]: try: A =float(a_ ) except ValueError: raise ValueError("Please enter a valid number" ) A =decimal - int(a_ ) if fractional_part == 0: return int(a_ ), 1 else: A =len(str(a_ ).split("." )[1] ) A =int(decimal * (10**number_of_frac_digits) ) A =10**number_of_frac_digits A , A =denominator, numerator while True: A =dividend % divisor if remainder == 0: break A , A =divisor, remainder A , A =numerator / divisor, denominator / divisor return int(a_ ), int(a_ ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction('67') = }''') print(F'''{decimal_to_fraction('45.0') = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction('6.25') = }''') print(F'''{decimal_to_fraction('78td') = }''')
689
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = VideoToVideoSDPipeline _A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} _A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} _A = PipelineTesterMixin.required_optional_params - {"latents"} _A = False # No `output_type`. _A = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) A =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) A =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) A =CLIPTextModel(snake_case__ ) A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _a ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple=0 ): """simple docstring""" A =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) if str(snake_case__ ).startswith("mps" ): A =torch.manual_seed(snake_case__ ) else: A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A ={ "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _a ( self : Optional[Any] ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =self.get_dummy_components() A =VideoToVideoSDPipeline(**snake_case__ ) A =sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A ="np" A =sd_pipe(**snake_case__ ).frames A =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) A =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _a ( self : Optional[Any] ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : Dict ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A =VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames A =torch.Generator(device="cpu" ).manual_seed(0 ) A =torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case__ ) A =video.to("cuda" ) A ="Spiderman is surfing" A =pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type="pt" ).frames A =np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""MobileViTFeatureExtractor"""] __a = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
def UpperCamelCase_ ( a_ ) ->List[Any]: A =[] A =set({"(", "[", "{"} ) A =set({")", "]", "}"} ) A ={"{": "}", "[": "]", "(": ")"} for i in range(len(a_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a_ ) == 0 or (len(a_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a_ ) == 0 def UpperCamelCase_ ( ) ->Dict: A =input("Enter sequence of brackets: " ) if is_balanced(a_ ): print(a_ , "is balanced" ) else: print(a_ , "is not balanced" ) if __name__ == "__main__": main()
689
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase_ ( ) ->None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
689
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : int , snake_case__ : Dict=0.01 , snake_case__ : Optional[Any]=10_00 ): """simple docstring""" A =p_stop A =max_length def __iter__( self : Optional[Any] ): """simple docstring""" A =0 A =False while not stop and count < self.max_length: yield count count += 1 A =random.random() < self.p_stop class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[Any]=False , snake_case__ : Any=True ): """simple docstring""" A =[ BatchSamplerShard(snake_case__ , 2 , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) for i in range(2 ) ] A =[list(snake_case__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(snake_case__ ) for shard in batch_sampler_shards] , [len(snake_case__ ) for e in expected] ) self.assertListEqual(snake_case__ , snake_case__ ) def _a ( self : List[str] ): """simple docstring""" A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case__ , snake_case__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) # Check the shards when the dataset is very small. A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ ) A =[[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ ) A =[[], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size. A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) # Check the shards when the dataset is very small. A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ ) A =[[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ ) A =[[], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ ) def _a ( self : Any ): """simple docstring""" A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ ) A =[ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is very small. A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ ) A =[[[0, 1]], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ ) A =[[], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ ) def _a ( self : int ): """simple docstring""" A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size. A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ ) A =[ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) # Check the shards when the dataset is very small. A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ ) A =[[[0, 1]], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ ) A =[[], []] self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ ) def _a ( self : List[str] ): """simple docstring""" A =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] A =[BatchSamplerShard(snake_case__ , 2 , snake_case__ , even_batches=snake_case__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _a ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=False ): """simple docstring""" random.seed(snake_case__ ) A =list(snake_case__ ) A =[ IterableDatasetShard( snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ , num_processes=snake_case__ , process_index=snake_case__ , split_batches=snake_case__ , ) for i in range(snake_case__ ) ] A =[] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(snake_case__ ) iterable_dataset_lists.append(list(snake_case__ ) ) A =batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size A =iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) self.assertTrue(len(snake_case__ ) % shard_batch_size == 0 ) A =[] for idx in range(0 , len(snake_case__ ) , snake_case__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(snake_case__ ) < len(snake_case__ ): reference += reference self.assertListEqual(snake_case__ , reference[: len(snake_case__ )] ) def _a ( self : int ): """simple docstring""" A =42 A =RandomIterableDataset() self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) # Edge case with a very small dataset A =RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ ) def _a ( self : int ): """simple docstring""" A =BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case__ ) A =SkipBatchSampler(snake_case__ , 2 ) self.assertListEqual(list(snake_case__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : str ): """simple docstring""" A =SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : List[Any] ): """simple docstring""" A =DataLoader(list(range(16 ) ) , batch_size=4 ) A =skip_first_batches(snake_case__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _a ( self : int ): """simple docstring""" A =DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(snake_case__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(snake_case__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _a ( self : List[Any] ): """simple docstring""" Accelerator() A =DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(snake_case__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(snake_case__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
689
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
1
from string import ascii_lowercase, ascii_uppercase def UpperCamelCase_ ( a_ ) ->str: if not sentence: return "" A =dict(zip(a_ , a_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
689
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } __a = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } __a = { """ctrl""": 2_5_6, } __a = { """Pregnancy""": 1_6_8_6_2_9, """Christianity""": 7_6_7_5, """Explain""": 1_0_6_4_2_3, """Fitness""": 6_3_4_4_0, """Saving""": 6_3_1_6_3, """Ask""": 2_7_1_7_1, """Ass""": 9_5_9_8_5, """Joke""": 1_6_3_5_0_9, """Questions""": 4_5_6_2_2, """Thoughts""": 4_9_6_0_5, """Retail""": 5_2_3_4_2, """Feminism""": 1_6_4_3_3_8, """Writing""": 1_1_9_9_2, """Atheism""": 1_9_2_2_6_3, """Netflix""": 4_8_6_1_6, """Computing""": 3_9_6_3_9, """Opinion""": 4_3_2_1_3, """Alone""": 4_4_9_6_7, """Funny""": 5_8_9_1_7, """Gaming""": 4_0_3_5_8, """Human""": 4_0_8_8, """India""": 1_3_3_1, """Joker""": 7_7_1_3_8, """Diet""": 3_6_2_0_6, """Legal""": 1_1_8_5_9, """Norman""": 4_9_3_9, """Tip""": 7_2_6_8_9, """Weight""": 5_2_3_4_3, """Movies""": 4_6_2_7_3, """Running""": 2_3_4_2_5, """Science""": 2_0_9_0, """Horror""": 3_7_7_9_3, """Confession""": 6_0_5_7_2, """Finance""": 1_2_2_5_0, """Politics""": 1_6_3_6_0, """Scary""": 1_9_1_9_8_5, """Support""": 1_2_6_5_4, """Technologies""": 3_2_5_1_6, """Teenage""": 6_6_1_6_0, """Event""": 3_2_7_6_9, """Learned""": 6_7_4_6_0, """Notion""": 1_8_2_7_7_0, """Wikipedia""": 3_7_5_8_3, """Books""": 6_6_6_5, """Extract""": 7_6_0_5_0, """Confessions""": 1_0_2_7_0_1, """Conspiracy""": 7_5_9_3_2, """Links""": 6_3_6_7_4, """Narcissus""": 1_5_0_4_2_5, """Relationship""": 5_4_7_6_6, """Relationships""": 1_3_4_7_9_6, """Reviews""": 4_1_6_7_1, """News""": 4_2_5_6, """Translation""": 2_6_8_2_0, """multilingual""": 1_2_8_4_0_6, } def UpperCamelCase_ ( a_ ) ->List[str]: A =set() A =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A =char A =set(a_ ) return pairs class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = CONTROL_CODES def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ): """simple docstring""" super().__init__(unk_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: A =json.load(snake_case__ ) A ={v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: A =merges_handle.read().split("\n" )[1:-1] A =[tuple(merge.split() ) for merge in merges] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A ={} @property def _a ( self : str ): """simple docstring""" return len(self.encoder ) def _a ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : int , snake_case__ : Any ): """simple docstring""" if token in self.cache: return self.cache[token] A =tuple(snake_case__ ) A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A =get_pairs(snake_case__ ) if not pairs: return token while True: A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A , A =bigram A =[] A =0 while i < len(snake_case__ ): try: A =word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A =j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A =tuple(snake_case__ ) A =new_word if len(snake_case__ ) == 1: break else: A =get_pairs(snake_case__ ) A ="@@ ".join(snake_case__ ) A =word[:-4] A =word return word def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" A =[] A =re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _a ( self : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : Any ): """simple docstring""" A =" ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) A =0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A =token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
689
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } __a = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } __a = { """ctrl""": 2_5_6, } __a = { """Pregnancy""": 1_6_8_6_2_9, """Christianity""": 7_6_7_5, """Explain""": 1_0_6_4_2_3, """Fitness""": 6_3_4_4_0, """Saving""": 6_3_1_6_3, """Ask""": 2_7_1_7_1, """Ass""": 9_5_9_8_5, """Joke""": 1_6_3_5_0_9, """Questions""": 4_5_6_2_2, """Thoughts""": 4_9_6_0_5, """Retail""": 5_2_3_4_2, """Feminism""": 1_6_4_3_3_8, """Writing""": 1_1_9_9_2, """Atheism""": 1_9_2_2_6_3, """Netflix""": 4_8_6_1_6, """Computing""": 3_9_6_3_9, """Opinion""": 4_3_2_1_3, """Alone""": 4_4_9_6_7, """Funny""": 5_8_9_1_7, """Gaming""": 4_0_3_5_8, """Human""": 4_0_8_8, """India""": 1_3_3_1, """Joker""": 7_7_1_3_8, """Diet""": 3_6_2_0_6, """Legal""": 1_1_8_5_9, """Norman""": 4_9_3_9, """Tip""": 7_2_6_8_9, """Weight""": 5_2_3_4_3, """Movies""": 4_6_2_7_3, """Running""": 2_3_4_2_5, """Science""": 2_0_9_0, """Horror""": 3_7_7_9_3, """Confession""": 6_0_5_7_2, """Finance""": 1_2_2_5_0, """Politics""": 1_6_3_6_0, """Scary""": 1_9_1_9_8_5, """Support""": 1_2_6_5_4, """Technologies""": 3_2_5_1_6, """Teenage""": 6_6_1_6_0, """Event""": 3_2_7_6_9, """Learned""": 6_7_4_6_0, """Notion""": 1_8_2_7_7_0, """Wikipedia""": 3_7_5_8_3, """Books""": 6_6_6_5, """Extract""": 7_6_0_5_0, """Confessions""": 1_0_2_7_0_1, """Conspiracy""": 7_5_9_3_2, """Links""": 6_3_6_7_4, """Narcissus""": 1_5_0_4_2_5, """Relationship""": 5_4_7_6_6, """Relationships""": 1_3_4_7_9_6, """Reviews""": 4_1_6_7_1, """News""": 4_2_5_6, """Translation""": 2_6_8_2_0, """multilingual""": 1_2_8_4_0_6, } def UpperCamelCase_ ( a_ ) ->List[str]: A =set() A =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A =char A =set(a_ ) return pairs class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = CONTROL_CODES def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ): """simple docstring""" super().__init__(unk_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: A =json.load(snake_case__ ) A ={v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: A =merges_handle.read().split("\n" )[1:-1] A =[tuple(merge.split() ) for merge in merges] A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) A ={} @property def _a ( self : str ): """simple docstring""" return len(self.encoder ) def _a ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : int , snake_case__ : Any ): """simple docstring""" if token in self.cache: return self.cache[token] A =tuple(snake_case__ ) A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A =get_pairs(snake_case__ ) if not pairs: return token while True: A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A , A =bigram A =[] A =0 while i < len(snake_case__ ): try: A =word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A =j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A =tuple(snake_case__ ) A =new_word if len(snake_case__ ) == 1: break else: A =get_pairs(snake_case__ ) A ="@@ ".join(snake_case__ ) A =word[:-4] A =word return word def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" A =[] A =re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _a ( self : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Union[str, Any] , snake_case__ : str ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : Any ): """simple docstring""" A =" ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) A =0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) A =token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
689
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=snake_case__ , ) assert hasattr(self , "env" ) def _a ( self : List[Any] , snake_case__ : List[str] ): """simple docstring""" A =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}''' # distributed data settings A ={"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case__ , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , ) def _a ( self : Optional[Any] , snake_case__ : Dict ): """simple docstring""" TrainingJobAnalytics(snake_case__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def _a ( self : Any , snake_case__ : Union[str, Any] ): """simple docstring""" A =self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe A =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) A =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A =( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
689
def UpperCamelCase_ ( a_ , a_ ) ->list[int]: A =int(a_ ) # Initialize Result A =[] # Traverse through all denomination for denomination in reversed(a_ ): # Find denominations while int(a_ ) >= int(a_ ): total_value -= int(a_ ) answer.append(a_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __a = [] __a = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __a = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __a = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] __a = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __a = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
689
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __a = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase__( unittest.TestCase ): """simple docstring""" _A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _A = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _a ( self : Union[str, Any] ): """simple docstring""" A =pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) A =text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}] ) A =text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) A =text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) A =text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}] ) # Legacy behavior A =text_classifier("This is great !" , return_all_scores=snake_case__ ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}] ) A =text_classifier("This is great !" , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) A =text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) A =text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ] , ) @require_torch def _a ( self : str ): """simple docstring""" import torch A =pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) A =text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}] ) @require_tf def _a ( self : List[Any] ): """simple docstring""" A =pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) A =text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "LABEL_0", "score": 0.504}] ) @slow @require_torch def _a ( self : Any ): """simple docstring""" A =pipeline("text-classification" ) A =text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "POSITIVE", "score": 1.0}] ) A =text_classifier("This is bad !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "NEGATIVE", "score": 1.0}] ) A =text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "POSITIVE", "score": 0.988}] ) @slow @require_tf def _a ( self : Dict ): """simple docstring""" A =pipeline("text-classification" , framework="tf" ) A =text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "POSITIVE", "score": 1.0}] ) A =text_classifier("This is bad !" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "NEGATIVE", "score": 1.0}] ) A =text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": "POSITIVE", "score": 0.988}] ) def _a ( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple ): """simple docstring""" A =TextClassificationPipeline(model=snake_case__ , tokenizer=snake_case__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _a ( self : Dict , snake_case__ : int , snake_case__ : Optional[Any] ): """simple docstring""" A =text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 A ="HuggingFace is in" A =text_classifier(snake_case__ ) self.assertEqual(nested_simplify(snake_case__ ) , [{"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) A =["HuggingFace is in ", "Paris is in France"] A =text_classifier(snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [{"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}, {"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format A =text_classifier(snake_case__ , top_k=snake_case__ ) A =len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(snake_case__ ) , [[{"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}] * N, [{"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}] * N] , ) A ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"} A =text_classifier(snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , {"label": ANY(snake_case__ ), "score": ANY(snake_case__ )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. A =[["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(snake_case__ ): text_classifier(snake_case__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility A =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(snake_case__ ) , [{"label": ANY(snake_case__ ), "score": ANY(snake_case__ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
689
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = ["""model.decoder.embed_positions.weights"""] def UpperCamelCase_ ( a_ ) ->List[str]: if "emb" in name: A =name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: A =name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: A =name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: A =name.replace("linear1" , "fc1" ) if "linear2" in name: A =name.replace("linear2" , "fc2" ) if "norm1" in name: A =name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: A =name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: A =name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: A =name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: A =name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]: A =list(state_dict.keys() ) A ={} for key in keys: A =state_dict.pop(a_ ) A =rename_keys(a_ ) if "in_proj_weight" in key: # split fused qkv proj A =val[:hidden_size, :] A =val[hidden_size : 2 * hidden_size, :] A =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A =val else: A =val return state_dict, enc_dec_proj_state_dict def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig: if checkpoint == "small": # default config values A =1024 A =24 A =16 elif checkpoint == "medium": A =1536 A =48 A =24 elif checkpoint == "large": A =2048 A =48 A =32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) A =MusicgenDecoderConfig( hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , ) return config @torch.no_grad() def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]: A =MusicGen.get_pretrained(a_ , device=a_ ) A =decoder_config_from_checkpoint(a_ ) A =fairseq_model.lm.state_dict() A , A =rename_state_dict( a_ , hidden_size=decoder_config.hidden_size ) A =TaEncoderModel.from_pretrained("t5-base" ) A =EncodecModel.from_pretrained("facebook/encodec_32khz" ) A =MusicgenForCausalLM(a_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A , A =decoder.load_state_dict(a_ , strict=a_ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(a_ ) if len(a_ ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(a_ ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(a_ ) # check we can do a forward pass A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A =model(input_ids=a_ , decoder_input_ids=a_ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor A =AutoTokenizer.from_pretrained("t5-base" ) A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ ) # set the appropriate bos/pad token ids A =2048 A =2048 # set other default generation config params A =int(30 * audio_encoder.config.frame_rate ) A =True A =3.0 if pytorch_dump_folder is not None: Path(a_ ).mkdir(exist_ok=a_ ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(a_ ) processor.push_to_hub(a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) __a = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "segformer" def __init__( self : str , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : Optional[int]=[2, 2, 2, 2] , snake_case__ : List[str]=[8, 4, 2, 1] , snake_case__ : Tuple=[32, 64, 1_60, 2_56] , snake_case__ : Dict=[7, 3, 3, 3] , snake_case__ : List[str]=[4, 2, 2, 2] , snake_case__ : List[str]=[1, 2, 5, 8] , snake_case__ : Optional[Any]=[4, 4, 4, 4] , snake_case__ : List[Any]="gelu" , snake_case__ : Tuple=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=1E-6 , snake_case__ : int=2_56 , snake_case__ : int=2_55 , **snake_case__ : Union[str, Any] , ): """simple docstring""" super().__init__(**snake_case__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( "Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be" " removed, as the behaviour will default to that of reshape_last_stage = True." , snake_case__ , ) A =num_channels A =num_encoder_blocks A =depths A =sr_ratios A =hidden_sizes A =patch_sizes A =strides A =mlp_ratios A =num_attention_heads A =hidden_act A =hidden_dropout_prob A =attention_probs_dropout_prob A =classifier_dropout_prob A =initializer_range A =drop_path_rate A =layer_norm_eps A =decoder_hidden_size A =kwargs.get("reshape_last_stage" , snake_case__ ) A =semantic_loss_ignore_index class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = version.parse("1.11" ) @property def _a ( self : Optional[Any] ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _a ( self : Tuple ): """simple docstring""" return 1E-4 @property def _a ( self : str ): """simple docstring""" return 12
689
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_ ( a_ ) ->Tuple: A =FileLock(str(tmpdir / "foo.lock" ) ) A =FileLock(str(tmpdir / "foo.lock" ) ) A =0.01 with locka.acquire(): with pytest.raises(a_ ): A =time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def UpperCamelCase_ ( a_ ) ->List[Any]: A ="a" * 1000 + ".lock" A =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
689
1
__a = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def UpperCamelCase_ ( ) ->None: A =input("Enter message: " ) A =input("Enter key [alphanumeric]: " ) A =input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): A ="encrypt" A =encrypt_message(a_ , a_ ) elif mode.lower().startswith("d" ): A ="decrypt" A =decrypt_message(a_ , a_ ) print(f'''\n{mode.title()}ed message:''' ) print(a_ ) def UpperCamelCase_ ( a_ , a_ ) ->str: return translate_message(a_ , a_ , "encrypt" ) def UpperCamelCase_ ( a_ , a_ ) ->str: return translate_message(a_ , a_ , "decrypt" ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->str: A =[] A =0 A =key.upper() for symbol in message: A =LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(a_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(a_ ): A =0 else: translated.append(a_ ) return "".join(a_ ) if __name__ == "__main__": main()
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __a = """\ """ __a = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ __a = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def _a ( self : Any , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int = 16 , snake_case__ : bool = True , snake_case__ : Dict=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": A ="cuda" else: A ="cuda" if torch.cuda.is_available() else "cpu" A =AutoModelForCausalLM.from_pretrained(snake_case__ ) A =model.to(snake_case__ ) A =AutoTokenizer.from_pretrained(snake_case__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: A =list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(snake_case__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" A =model.config.max_length - 1 else: A =model.config.max_length A =tokenizer( snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors="pt" , return_attention_mask=snake_case__ , ).to(snake_case__ ) A =encodings["input_ids"] A =encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." A =[] A =CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(snake_case__ ) , snake_case__ ) ): A =min(start_index + batch_size , len(snake_case__ ) ) A =encoded_texts[start_index:end_index] A =attn_masks[start_index:end_index] if add_start_token: A =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case__ ) A =torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) A =torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case__ ), attn_mask] , dim=1 ) A =encoded_batch with torch.no_grad(): A =model(snake_case__ , attention_mask=snake_case__ ).logits A =out_logits[..., :-1, :].contiguous() A =labels[..., 1:].contiguous() A =attn_mask[..., 1:].contiguous() A =torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , snake_case__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case__ )}
689
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __a = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __a = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __a = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Any ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ): """simple docstring""" A =len(references[0] ) if any(len(snake_case__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A =[[refs[i] for refs in references] for i in range(snake_case__ )] A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =sb_chrf.corpus_score(snake_case__ , snake_case__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
689
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = DanceDiffusionPipeline _A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _A = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } _A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _A = False _A = False def _a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=snake_case__ , use_timestep_embedding=snake_case__ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) A =IPNDMScheduler() A ={ "unet": unet, "scheduler": scheduler, } return components def _a ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=0 ): """simple docstring""" if str(snake_case__ ).startswith("mps" ): A =torch.manual_seed(snake_case__ ) else: A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A ={ "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def _a ( self : List[Any] ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =self.get_dummy_components() A =DanceDiffusionPipeline(**snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A =pipe(**snake_case__ ) A =output.audios A =audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) A =np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a ( self : str ): """simple docstring""" return super().test_save_load_local() @skip_mps def _a ( self : str ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _a ( self : Optional[int] ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def _a ( self : Dict ): """simple docstring""" return super().test_attention_slicing_forward_pass() def _a ( self : List[Any] ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): """simple docstring""" A =torch_device A =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.manual_seed(0 ) A =pipe(generator=snake_case__ , num_inference_steps=1_00 , audio_length_in_s=4.096 ) A =output.audios A =audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) A =np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self : List[str] ): """simple docstring""" A =torch_device A =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.manual_seed(0 ) A =pipe(generator=snake_case__ , num_inference_steps=1_00 , audio_length_in_s=4.096 ) A =output.audios A =audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) A =np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCamelCase__: """simple docstring""" def __init__( self : List[str] , snake_case__ : Optional[int] , ): """simple docstring""" A =parent A =13 A =7 A =True A =True A =True A =True A =True A =False A =False A =False A =2 A =99 A =0 A =32 A =2 A =4 A =0.1 A =0.1 A =5_12 A =16 A =2 A =0.02 A =3 A =4 A ="last" A =True A =None A =0 def _a ( self : Optional[Any] ): """simple docstring""" A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A =None if self.use_input_lengths: A =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A =None if self.use_token_type_ids: A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A =None A =None A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A =ids_tensor([self.batch_size] , self.num_choices ) A =FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ): """simple docstring""" A =TFFlaubertModel(config=snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) A =[input_ids, input_mask] A =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertWithLMHeadModel(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ): """simple docstring""" A =TFFlaubertForQuestionAnsweringSimple(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ): """simple docstring""" A =TFFlaubertForSequenceClassification(snake_case__ ) A ={"input_ids": input_ids, "lengths": input_lengths} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ): """simple docstring""" A =self.num_labels A =TFFlaubertForTokenClassification(config=snake_case__ ) A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ): """simple docstring""" A =self.num_choices A =TFFlaubertForMultipleChoice(config=snake_case__ ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) A ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self : Any ): """simple docstring""" A =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) =config_and_inputs A ={ "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _A = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _A = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _A = False _A = False def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _a ( self : Optional[int] ): """simple docstring""" A =TFFlaubertModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def _a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ ) @slow def _a ( self : Tuple ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =TFFlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Tuple ): """simple docstring""" A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A =tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A =model(snake_case__ )[0] A =tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. A =tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
689
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : List[str] , *, snake_case__ : int = 4 , snake_case__ : int = 7_68 , snake_case__ : int , snake_case__ : Dict , ): """simple docstring""" super().__init__() A =nn.Parameter(torch.zeros(snake_case__ ) ) # parameters for additional clip time embeddings A =nn.Linear(snake_case__ , snake_case__ ) A =nn.Linear(snake_case__ , snake_case__ ) # parameters for encoder hidden states A =clip_extra_context_tokens A =nn.Linear( snake_case__ , self.clip_extra_context_tokens * cross_attention_dim ) A =nn.Linear(snake_case__ , snake_case__ ) A =nn.LayerNorm(snake_case__ ) def _a ( self : int , *, snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str ): """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings A =image_embeddings.shape[0] A =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) A =classifier_free_guidance_embeddings.expand( snake_case__ , -1 ) A =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] A =prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... A =self.embedding_proj(snake_case__ ) A =self.clip_image_embeddings_project_to_time_embeddings(snake_case__ ) A =time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" A =self.clip_extra_context_tokens_proj(snake_case__ ) A =clip_extra_context_tokens.reshape(snake_case__ , -1 , self.clip_extra_context_tokens ) A =clip_extra_context_tokens.permute(0 , 2 , 1 ) A =self.encoder_hidden_states_proj(snake_case__ ) A =self.text_encoder_hidden_states_norm(snake_case__ ) A =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
689
from __future__ import annotations def UpperCamelCase_ ( a_ ) ->None: create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] ) def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None: if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A =True create_state_space_tree(a_ , a_ , index + 1 , a_ ) current_sequence.pop() A =False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
689
1
def UpperCamelCase_ ( a_ ) ->Optional[int]: A =len(a_ ) for i in range(length - 1 ): A =i for k in range(i + 1 , a_ ): if collection[k] < collection[least]: A =k if least != i: A , A =(collection[i], collection[least]) return collection if __name__ == "__main__": __a = input("""Enter numbers separated by a comma:\n""").strip() __a = [int(item) for item in user_input.split(""",""")] print(selection_sort(unsorted))
689
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) A =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _a ( self : int ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) A =DDPMScheduler() A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 ) A =output.audios[0] A =output.images[0] A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) A =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 A =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) A =DDIMScheduler() A =self.dummy_vqvae_and_unet A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) A =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 A =self.dummy_unet_condition A =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) A =torch.rand((1, 1, 10) ) A =pipe(generator=snake_case__ , encoding=snake_case__ ) A =output.images[0] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ): """simple docstring""" A =torch_device A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) A =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) A =torch.Generator(device=snake_case__ ).manual_seed(42 ) A =pipe(generator=snake_case__ ) A =output.audios[0] A =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10] A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
689
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A =find_backend(" if not is_torch_available():" ) self.assertEqual(snake_case__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") A =find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") A =find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" ) def _a ( self : List[Any] ): """simple docstring""" A =read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , snake_case__ ) self.assertIn("torch_and_transformers" , snake_case__ ) self.assertIn("flax_and_transformers" , snake_case__ ) self.assertIn("torch_and_transformers_and_onnx" , snake_case__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def _a ( self : Dict ): """simple docstring""" A =create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(snake_case__ , "\nCONSTANT = None\n" ) A =create_dummy_object("function" , "'torch'" ) self.assertEqual( snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" A =create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(snake_case__ , snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , snake_case__ )
689
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a = 1_6 __a = 3_2 def UpperCamelCase_ ( a_ , a_ = 16 ) ->Dict: A =AutoTokenizer.from_pretrained("bert-base-cased" ) A =load_dataset("glue" , "mrpc" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) A =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A =datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A =tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. A =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A =16 elif accelerator.mixed_precision != "no": A =8 else: A =None return tokenizer.pad( a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , ) # Instantiate dataloaders. A =DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) A =DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __a = mocked_dataloaders # noqa: F811 def UpperCamelCase_ ( a_ , a_ ) ->List[str]: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1": A =2 # Initialize accelerator A =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A =config["lr"] A =int(config["num_epochs"] ) A =int(config["seed"] ) A =int(config["batch_size"] ) A =evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=a_ ) def inner_training_loop(a_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(a_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A =model.to(accelerator.device ) # Instantiate optimizer A =AdamW(params=model.parameters() , lr=a_ ) A , A =get_dataloaders(a_ , a_ ) # Instantiate scheduler A =get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A =accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A =model(**a_ ) A =outputs.loss accelerator.backward(a_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A =model(**a_ ) A =outputs.logits.argmax(dim=-1 ) A , A =accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=a_ , references=a_ , ) A =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , a_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCamelCase_ ( ) ->Union[str, Any]: A =argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) A =parser.parse_args() A ={"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(a_ , a_ ) if __name__ == "__main__": main()
689
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
1
from __future__ import annotations import math class UpperCamelCase__: """simple docstring""" def __init__( self : Dict , snake_case__ : int ): """simple docstring""" A =size # approximate the overall size of segment tree with given value A =[0 for i in range(0 , 4 * size )] # create array to store lazy update A =[0 for i in range(0 , 4 * size )] A =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self : List[str] , snake_case__ : int ): """simple docstring""" return idx * 2 def _a ( self : Any , snake_case__ : int ): """simple docstring""" return idx * 2 + 1 def _a ( self : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : list[int] ): """simple docstring""" if left_element == right_element: A =a[left_element - 1] else: A =(left_element + right_element) // 2 self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ ) self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ ) A =max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" if self.flag[idx] is True: A =self.lazy[idx] A =False if left_element != right_element: A =self.lazy[idx] A =self.lazy[idx] A =True A =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: A =val if left_element != right_element: A =val A =val A =True A =True return True A =(left_element + right_element) // 2 self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) return True def _a ( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" if self.flag[idx] is True: A =self.lazy[idx] A =False if left_element != right_element: A =self.lazy[idx] A =self.lazy[idx] A =True A =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] A =(left_element + right_element) // 2 A =self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ ) return max(snake_case__ , snake_case__ ) def __str__( self : Any ): """simple docstring""" return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": __a = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] __a = 1_5 __a = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
689
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"""vocab_file""": """vocab.txt"""} __a = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } __a = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def UpperCamelCase_ ( a_ ) ->List[Any]: A =collections.OrderedDict() with open(a_ , "r" , encoding="utf-8" ) as reader: A =reader.readlines() for index, token in enumerate(a_ ): A =token.rstrip("\n" ) A =index return vocab class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ): """simple docstring""" A =vocab A =unk_token A =max_input_chars_per_word def _a ( self : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" A =list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] A =0 A =[] while start < len(snake_case__ ): A =len(snake_case__ ) A =None while start < end: A ="".join(chars[start:end] ) if substr in self.vocab: A =substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) A =end return sub_tokens class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] _A = False def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) A =bod_token A =eod_token A =load_vocab(snake_case__ ) A =self.encoder[space_token] A =self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) A ={v: k for k, v in self.encoder.items()} A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _a ( self : Dict ): """simple docstring""" return self.encoder[self.bod_token] @property def _a ( self : List[str] ): """simple docstring""" return self.encoder[self.eod_token] @property def _a ( self : Any ): """simple docstring""" return self.encoder["\n"] @property def _a ( self : List[str] ): """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Tuple , snake_case__ : int ): """simple docstring""" A =[] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ): """simple docstring""" A =[i for i in token_ids if i >= 0] A =[ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def _a ( self : List[Any] , snake_case__ : int ): """simple docstring""" return token in self.encoder def _a ( self : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" return "".join(snake_case__ ) def _a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _a ( self : Dict , snake_case__ : Optional[int] ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" if os.path.isdir(snake_case__ ): A =os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: A =(filename_prefix + "-" if filename_prefix else "") + save_directory A =0 if " " in self.encoder: A =self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: A =self.encoder["\n"] del self.encoder["\n"] A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) A =token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
689
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , a_ ) ->np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: A =ksize + 1 A =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(a_ ): for x in range(a_ ): # distance from center A =x - ksize // 2 A =y - ksize // 2 # degree to radiant A =theta / 180 * np.pi A =np.cos(_theta ) A =np.sin(_theta ) # get kernel x A =cos_theta * px + sin_theta * py # get kernel y A =-sin_theta * px + cos_theta * py # fill kernel A =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __a = imread("""../image_data/lena.jpg""") # turn image in gray scale value __a = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __a = np.zeros(gray.shape[:2]) for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]: __a = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __a = out / out.max() * 2_5_5 __a = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
689
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int: try: A =int(a_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A =2 A =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A =i while n % i == 0: A =n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'''{solution() = }''')
689
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __a = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase_ ( a_ , a_=None , a_=None , a_=None ) ->List[str]: A =True while ask_again: A =input(a_ ) try: if default is not None and len(a_ ) == 0: return default return convert_value(a_ ) if convert_value is not None else result except Exception: if error_message is not None: print(a_ ) def UpperCamelCase_ ( a_ , a_=[] , a_=None , a_=0 ) ->Optional[int]: A =BulletMenu(a_ , a_ ) A =menu.run(default_choice=a_ ) return convert_value(a_ ) if convert_value is not None else result def UpperCamelCase_ ( a_ ) ->Optional[int]: A =int(a_ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def UpperCamelCase_ ( a_ ) ->str: A =int(a_ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def UpperCamelCase_ ( a_ ) ->Union[str, Any]: A =int(a_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase_ ( a_ ) ->int: A =int(a_ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def UpperCamelCase_ ( a_ ) ->Any: A =int(a_ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def UpperCamelCase_ ( a_ ) ->str: return {"yes": True, "no": False}[value.lower()] class UpperCamelCase__( argparse.RawDescriptionHelpFormatter ): """simple docstring""" def _a ( self : int , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" A =super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =usage.replace("<command> [<args>] " , "" ) return usage
689
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__( lowerCAmelCase__ ): """simple docstring""" _A = "Wav2Vec2FeatureExtractor" _A = "AutoTokenizer" def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" super().__init__(snake_case__ , snake_case__ ) A =self.feature_extractor A =False @classmethod def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ): """simple docstring""" try: return super().from_pretrained(snake_case__ , **snake_case__ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case__ , ) A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ ) A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ ) return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ ) def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A =kwargs.pop("raw_speech" ) else: A =kwargs.pop("audio" , snake_case__ ) A =kwargs.pop("sampling_rate" , snake_case__ ) A =kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: A =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: A =encodings["input_ids"] return inputs def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) A =kwargs.pop("input_features" , snake_case__ ) A =kwargs.pop("labels" , snake_case__ ) if len(snake_case__ ) > 0: A =args[0] A =args[1:] if input_features is not None: A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: A =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: A =labels["input_ids"] return input_features def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def _a ( self : int ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A =True A =self.tokenizer yield A =self.feature_extractor A =False
689
1
import datasets from .evaluate import evaluate __a = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ __a = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ __a = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def _a ( self : int , snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" A ={prediction["id"]: prediction["prediction_text"] for prediction in predictions} A =[ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A =evaluate(dataset=snake_case__ , predictions=snake_case__ ) return score
689
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
689
1
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __a = """|""".join(sys.argv[1:]) __a = re.compile(rF'''^({joined_dirs}).*?\.py$''') __a = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
689
1
def UpperCamelCase_ ( a_ , a_ ) ->float: return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(1_0_0, 0.25) = }''') print(F'''{price_plus_tax(125.50, 0.05) = }''')
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["""MobileViTFeatureExtractor"""] __a = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
from math import isclose, sqrt def UpperCamelCase_ ( a_ , a_ , a_ ) ->tuple[float, float, float]: A =point_y / 4 / point_x A =2 * normal_gradient / (1 + normal_gradient * normal_gradient) A =(1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) A =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 A =outgoing_gradient**2 + 4 A =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) A =(point_y - outgoing_gradient * point_x) ** 2 - 100 A =( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) A =( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point A =x_minus if isclose(a_ , a_ ) else x_plus A =point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def UpperCamelCase_ ( a_ = 1.4 , a_ = -9.6 ) ->int: A =0 A =first_x_coord A =first_y_coord A =(10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): A , A , A =next_point(a_ , a_ , a_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'''{solution() = }''')
689
def UpperCamelCase_ ( a_ , a_ ) ->int: return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase_ ( ) ->None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
689
1
def UpperCamelCase_ ( a_ ) ->int: A =1 for i in range(1 , num + 1 ): fact *= i return fact def UpperCamelCase_ ( a_ ) ->int: A =0 while number > 0: A =number % 10 sum_of_digits += last_digit A =number // 10 # Removing the last_digit from the given number return sum_of_digits def UpperCamelCase_ ( a_ = 100 ) ->int: A =factorial(a_ ) A =split_and_add(a_ ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
689
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations(a_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: def count_of_possible_combinations_with_dp_array( a_ , a_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A =sum( count_of_possible_combinations_with_dp_array(target - item , a_ ) for item in array ) A =answer return answer A =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(a_ , a_ ) def UpperCamelCase_ ( a_ , a_ , a_ ) ->int: A =[0] * (target + 1) A =1 for i in range(1 , target + 1 ): for j in range(a_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __a = 3 __a = 5 __a = [1, 2, 5] print(combination_sum_iv(n, array, target))
689
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __a = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __a = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __a = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): """simple docstring""" def _a ( self : Any ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ): """simple docstring""" A =len(references[0] ) if any(len(snake_case__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A =[[refs[i] for refs in references] for i in range(snake_case__ )] A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A =sb_chrf.corpus_score(snake_case__ , snake_case__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
689
from __future__ import annotations import math def UpperCamelCase_ ( a_ , a_ ) ->float: A =u for i in range(1 , a_ ): A =temp * (u - i) return temp def UpperCamelCase_ ( ) ->None: A =int(input("enter the numbers of values: " ) ) A =[] for _ in range(a_ ): y.append([] ) for i in range(a_ ): for j in range(a_ ): y[i].append(a_ ) A =0 print("enter the values of parameters in a list: " ) A =list(map(a_ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a_ ): A =float(input() ) A =int(input("enter the value to interpolate: " ) ) A =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , a_ ): for j in range(n - i ): A =y[j + 1][i - 1] - y[j][i - 1] A =y[0][0] for i in range(1 , a_ ): summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
689
1
from math import isqrt, loga def UpperCamelCase_ ( a_ ) ->list[int]: A =[True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , a_ , a_ ): A =False return [i for i in range(2 , a_ ) if is_prime[i]] def UpperCamelCase_ ( a_ = 80_0800 , a_ = 80_0800 ) ->int: A =degree * loga(a_ ) A =int(a_ ) A =calculate_prime_numbers(a_ ) A =0 A =0 A =len(a_ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
689
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCamelCase_ ( a_ ) ->Any: # getting number of pixels in the image A , A =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(a_ ): for j in range(a_ ): A =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __a = imread("""image_data/lena.jpg""", 1) # convert to its negative __a = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
689
1