code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup _UpperCAmelCase : Any = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def A ( lowercase = "dhaka" , lowercase = 5 ) -> int: '''simple docstring''' UpperCamelCase = min(lowercase , 50 ) # Prevent abuse! UpperCamelCase = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } UpperCamelCase = requests.get('https://www.google.com/search' , params=lowercase , headers=lowercase ) UpperCamelCase = BeautifulSoup(html.text , 'html.parser' ) UpperCamelCase = ''.join( re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) UpperCamelCase = json.dumps(lowercase ) UpperCamelCase = json.loads(lowercase ) UpperCamelCase = re.findall( R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowercase , ) if not matched_google_image_data: return 0 UpperCamelCase = re.sub( R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowercase ) , ) UpperCamelCase = re.findall( R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowercase , ) for index, fixed_full_res_image in enumerate(lowercase ): if index >= max_images: return index UpperCamelCase = bytes(lowercase , 'ascii' ).decode( 'unicode-escape' ) UpperCamelCase = bytes(lowercase , 'ascii' ).decode( 'unicode-escape' ) UpperCamelCase = urllib.request.build_opener() UpperCamelCase = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(lowercase ) UpperCamelCase = f'''query_{query.replace(' ' , '_' )}''' if not os.path.exists(lowercase ): os.makedirs(lowercase ) urllib.request.urlretrieve( # noqa: S310 lowercase , f'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: _UpperCAmelCase : str = download_images_from_google_query(sys.argv[1]) print(F'''{image_count} images were downloaded to disk.''') except IndexError: print("Please provide a search term.") raise
3
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
1
import os import pytest from attr import dataclass _UpperCAmelCase : str = "us-east-1" # defaults region @dataclass class lowercase : __lowercase : str __lowercase : Union[str, Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" __lowercase : Any = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5_500, } __lowercase : List[str] = {**hyperparameters, "max_steps": 1_000} @property def __UpperCamelCase ( self ) -> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __UpperCamelCase ( self ) -> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __UpperCamelCase ( self ) -> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __UpperCamelCase ( self ) -> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = SageMakerTestEnvironment(framework=request.cls.framework )
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = SpeechTaTokenizer __lowercase : Union[str, Any] = False __lowercase : Any = True def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = SpeechTaTokenizer(A_ ) UpperCamelCase = AddedToken('<mask>' , lstrip=A_ , rstrip=A_ ) UpperCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = 'this is a test' UpperCamelCase = 'this is a test' return input_text, output_text def __UpperCamelCase ( self , A_ , A_=False , A_=20 , A_=5 ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_input_output_texts(A_ ) UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ ) return text, ids def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = '<pad>' UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-4] , 'œ' ) self.assertEqual(vocab_keys[-2] , '<mask>' ) self.assertEqual(vocab_keys[-1] , '<ctc_blank>' ) self.assertEqual(len(A_ ) , 81 ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): UpperCamelCase = tokenizer.vocab_size UpperCamelCase = len(A_ ) self.assertNotEqual(A_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd'] UpperCamelCase = tokenizer.add_tokens(A_ ) UpperCamelCase = tokenizer.vocab_size UpperCamelCase = len(A_ ) self.assertNotEqual(A_ , 0 ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , len(A_ ) ) self.assertEqual(A_ , all_size + len(A_ ) ) UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ ) self.assertGreaterEqual(len(A_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} UpperCamelCase = tokenizer.add_special_tokens(A_ ) UpperCamelCase = tokenizer.vocab_size UpperCamelCase = len(A_ ) self.assertNotEqual(A_ , 0 ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , len(A_ ) ) self.assertEqual(A_ , all_size_a + len(A_ ) ) UpperCamelCase = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ ) self.assertGreaterEqual(len(A_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" pass def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" pass def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.get_tokenizer() UpperCamelCase = tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(A_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ ) # fmt: off self.assertListEqual(A_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" # Use custom sequence because this tokenizer does not handle numbers. UpperCamelCase = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off UpperCamelCase = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A_ , )
3
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , A_ = None , A_ = None , A_ = True , A_ = None , A_ = False , A_ = None , A_ = True , A_ = "arrow" , **A_ , ) -> int: """simple docstring""" super().__init__( split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , **A_ , ) UpperCamelCase = load_from_cache_file UpperCamelCase = file_format UpperCamelCase = Spark( df=A_ , features=A_ , cache_dir=A_ , working_dir=A_ , **A_ , ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=A_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
3
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
from string import ascii_uppercase _UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)} _UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase)) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = 0 while True: if x == i: UpperCamelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def A ( ) -> None: '''simple docstring''' UpperCamelCase = 'THE GERMAN ATTACK' UpperCamelCase = 'SECRET' UpperCamelCase = generate_key(lowercase , lowercase ) UpperCamelCase = cipher_text(lowercase , lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase , lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
3
1
import os from datetime import datetime as dt from github import Github _UpperCAmelCase : str = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def A ( ) -> List[Any]: '''simple docstring''' UpperCamelCase = Github(os.environ['GITHUB_TOKEN'] ) UpperCamelCase = g.get_repo('huggingface/diffusers' ) UpperCamelCase = repo.get_issues(state='open' ) for issue in open_issues: UpperCamelCase = sorted(issue.get_comments() , key=lambda lowercase : i.created_at , reverse=lowercase ) UpperCamelCase = comments[0] if len(lowercase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
3
from collections.abc import Callable def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def A ( lowercase ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
3
1
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
1
def A ( lowercase ) -> list: '''simple docstring''' for i in range(len(lowercase ) - 1 , 0 , -1 ): UpperCamelCase = False for j in range(lowercase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCamelCase , UpperCamelCase = unsorted[j - 1], unsorted[j] UpperCamelCase = True for j in range(lowercase ): if unsorted[j] > unsorted[j + 1]: UpperCamelCase , UpperCamelCase = unsorted[j + 1], unsorted[j] UpperCamelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : int = input("Enter numbers separated by a comma:\n").strip() _UpperCAmelCase : str = [int(item) for item in user_input.split(",")] print(F'''{cocktail_shaker_sort(unsorted) = }''')
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
def A ( lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase , UpperCamelCase = [], [] while len(lowercase ) > 1: UpperCamelCase , UpperCamelCase = min(lowercase ), max(lowercase ) start.append(lowercase ) end.append(lowercase ) collection.remove(lowercase ) collection.remove(lowercase ) end.reverse() return start + collection + end if __name__ == "__main__": _UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip() _UpperCAmelCase : Dict = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Dict = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
from typing import Any def A ( lowercase ) -> list[Any]: '''simple docstring''' if not input_list: return [] UpperCamelCase = [input_list.count(lowercase ) for value in input_list] UpperCamelCase = max(lowercase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
3
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = (DDPMScheduler,) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", # See all REALM models at https://huggingface.co/models?filter=realm } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = "realm" def __init__( self , A_=30_522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3_072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=256 , A_=10 , A_=1e-3 , A_=5 , A_=320 , A_=13_353_718 , A_=5_000 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) # Common config UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = retriever_proj_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = num_candidates UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = type_vocab_size UpperCamelCase = layer_norm_eps # Reader config UpperCamelCase = span_hidden_size UpperCamelCase = max_span_width UpperCamelCase = reader_layer_norm_eps UpperCamelCase = reader_beam_size UpperCamelCase = reader_seq_len # Retrieval config UpperCamelCase = num_block_records UpperCamelCase = searcher_beam_size
3
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
1
import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
3
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowercase : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = OpenLlamaModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = OpenLlamaModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) UpperCamelCase = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: """simple docstring""" UpperCamelCase = OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () __lowercase : List[str] = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __lowercase : Any = False __lowercase : Dict = False def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = OpenLlamaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'single_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'multi_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def __UpperCamelCase ( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = OpenLlamaModel(A_ ) original_model.to(A_ ) original_model.eval() UpperCamelCase = original_model(A_ ).last_hidden_state UpperCamelCase = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = {'type': scaling_type, 'factor': 10.0} UpperCamelCase = OpenLlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() UpperCamelCase = scaled_model(A_ ).last_hidden_state UpperCamelCase = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = "data2vec-text" def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class lowercase ( _SCREAMING_SNAKE_CASE ): @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
3
1
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A ( lowercase ) -> Any: '''simple docstring''' if isinstance(lowercase , collections.abc.Iterable ): return x return (x, x) @require_flax class lowercase : def __UpperCamelCase ( self , A_ , A_ ) -> Optional[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" pass def __UpperCamelCase ( self ) -> Dict: """simple docstring""" pass def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(A_ , A_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel(A_ ) UpperCamelCase = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_vision_text_model(A_ , A_ ) UpperCamelCase = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ ) UpperCamelCase = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) -> List[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_vision_text_model(A_ , A_ ) UpperCamelCase = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ ) UpperCamelCase = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ ) UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(A_ ) UpperCamelCase = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ ) UpperCamelCase = after_output[0] UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(A_ , 1e-3 ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_=None , **A_ ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_vision_text_model(A_ , A_ ) UpperCamelCase = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ ) UpperCamelCase = model( input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ ) UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(A_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = to_atuple(vision_model.config.image_size ) UpperCamelCase = to_atuple(vision_model.config.patch_size ) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(A_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[str]: """simple docstring""" pt_model.to(A_ ) pt_model.eval() # prepare inputs UpperCamelCase = inputs_dict UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): UpperCamelCase = pt_model(**A_ ).to_tuple() UpperCamelCase = fx_model(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(A_ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(A_ , from_pt=A_ ) UpperCamelCase = fx_model_loaded(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(A_ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(A_ ) UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(A_ , from_flax=A_ ) pt_model_loaded.to(A_ ) pt_model_loaded.eval() with torch.no_grad(): UpperCamelCase = pt_model_loaded(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(A_ , pt_output_loaded.numpy() , 4e-2 ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ ) UpperCamelCase = VisionTextDualEncoderModel(A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel(A_ ) UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ ) UpperCamelCase = fx_state self.check_pt_flax_equivalence(A_ , A_ , A_ ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ ) UpperCamelCase = VisionTextDualEncoderModel(A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel(A_ ) UpperCamelCase = load_flax_weights_in_pytorch_model(A_ , fx_model.params ) self.check_pt_flax_equivalence(A_ , A_ , A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**A_ ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**A_ ) @is_pt_flax_cross_test def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase = config_inputs_dict.pop('vision_config' ) UpperCamelCase = config_inputs_dict.pop('text_config' ) UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(A_ , A_ , A_ ) self.check_equivalence_flax_to_pt(A_ , A_ , A_ ) @slow def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.get_pretrained_model_and_inputs() UpperCamelCase = model_a(**A_ ) UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(A_ ) UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(A_ ) UpperCamelCase = model_a(**A_ ) UpperCamelCase = after_outputs[0] UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(A_ , 1e-5 ) @require_flax class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=A_ , text_from_pt=A_ , ) UpperCamelCase = 13 UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCamelCase = random_attention_mask([batch_size, 4] ) UpperCamelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCamelCase ( self , A_ , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = FlaxViTModel(A_ ) UpperCamelCase = FlaxBertModel(A_ ) return vision_model, text_model def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = FlaxBertModelTester(self ) UpperCamelCase = vit_model_tester.prepare_config_and_inputs() UpperCamelCase = bert_model_tester.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase = vision_config_and_inputs UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=A_ , text_from_pt=A_ , ) UpperCamelCase = 13 UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCamelCase = random_attention_mask([batch_size, 4] ) UpperCamelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCamelCase ( self , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = FlaxCLIPVisionModel(A_ ) UpperCamelCase = FlaxBertModel(A_ ) return vision_model, text_model def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = FlaxCLIPVisionModelTester(self ) UpperCamelCase = FlaxBertModelTester(self ) UpperCamelCase = clip_model_tester.prepare_config_and_inputs() UpperCamelCase = bert_model_tester.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase = vision_config_and_inputs UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowercase ( unittest.TestCase ): @slow def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=A_ , padding=A_ , return_tensors='np' ) UpperCamelCase = model(**A_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCamelCase = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , A_ , atol=1e-3 ) )
3
from random import shuffle import tensorflow as tf from numpy import array def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) assert noofclusters < len(lowercase ) # Find out the dimensionality UpperCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCamelCase = list(range(len(lowercase ) ) ) shuffle(lowercase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCamelCase = tf.placeholder('float64' , [dim] ) UpperCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase , lowercase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCamelCase = tf.placeholder('int32' ) UpperCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase , lowercase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCamelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCamelCase = tf.reduce_mean(lowercase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCamelCase = tf.placeholder('float' , [noofclusters] ) UpperCamelCase = tf.argmin(lowercase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCamelCase = 100 for _ in range(lowercase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase ) ): UpperCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCamelCase = [ sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCamelCase = sess.run( lowercase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase ): # Collect all the vectors assigned to this cluster UpperCamelCase = [ vectors[i] for i in range(len(lowercase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCamelCase = sess.run( lowercase , feed_dict={mean_input: array(lowercase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCamelCase = sess.run(lowercase ) UpperCamelCase = sess.run(lowercase ) return centroids, assignments
3
1
def A ( lowercase ) -> list[list[int]]: '''simple docstring''' UpperCamelCase = [] if len(lowercase ) == 1: return [nums.copy()] for _ in range(len(lowercase ) ): UpperCamelCase = nums.pop(0 ) UpperCamelCase = permute(lowercase ) for perm in permutations: perm.append(lowercase ) result.extend(lowercase ) nums.append(lowercase ) return result def A ( lowercase ) -> Optional[Any]: '''simple docstring''' def backtrack(lowercase ): if start == len(lowercase ) - 1: output.append(nums[:] ) else: for i in range(lowercase , len(lowercase ) ): UpperCamelCase , UpperCamelCase = nums[i], nums[start] backtrack(start + 1 ) UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack UpperCamelCase = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function _UpperCAmelCase : List[str] = permutea([1, 2, 3]) print(res) doctest.testmod()
3
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _UpperCAmelCase : Tuple = _symbol_database.Default() _UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _UpperCAmelCase : int = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _UpperCAmelCase : Optional[Any] = 45 _UpperCAmelCase : Any = 1_581 _UpperCAmelCase : Tuple = 1_517 _UpperCAmelCase : List[str] = 1_570 _UpperCAmelCase : int = 1_584 _UpperCAmelCase : List[Any] = 1_793 _UpperCAmelCase : Optional[int] = 1_795 _UpperCAmelCase : Any = 1_916 _UpperCAmelCase : Tuple = 1_864 _UpperCAmelCase : List[Any] = 1_905 _UpperCAmelCase : Union[str, Any] = 1_919 _UpperCAmelCase : str = 2_429 _UpperCAmelCase : Any = 2_208 _UpperCAmelCase : Dict = 2_418 _UpperCAmelCase : Optional[Any] = 2_323 _UpperCAmelCase : Tuple = 2_407 # @@protoc_insertion_point(module_scope)
3
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
3
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 try: UpperCamelCase = tempfile.mktemp() with open(A_ , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ ) UpperCamelCase = AlbertTokenizer.from_pretrained(A_ ) finally: os.remove(A_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ ) UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowercase ( unittest.TestCase ): __lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def __UpperCamelCase ( cls ) -> Tuple: """simple docstring""" UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def __UpperCamelCase ( cls ) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def __UpperCamelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __UpperCamelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = CustomTokenizer(A_ ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) UpperCamelCase = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def __UpperCamelCase ( self ) -> int: """simple docstring""" # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCamelCase = Trie() UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(A_ , ['AB', 'C'] )
3
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : Optional[Any] = 16 _UpperCAmelCase : Dict = 32 def A ( lowercase , lowercase = 16 ) -> List[str]: '''simple docstring''' UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase = datasets.map( lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase = 16 elif accelerator.mixed_precision != "no": UpperCamelCase = 8 else: UpperCamelCase = None return tokenizer.pad( lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCAmelCase : Union[str, Any] = mocked_dataloaders # noqa: F811 def A ( lowercase , lowercase ) -> int: '''simple docstring''' if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase ) == "1": UpperCamelCase = 2 # Initialize accelerator UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase = config['lr'] UpperCamelCase = int(config['num_epochs'] ) UpperCamelCase = int(config['seed'] ) UpperCamelCase = int(config['batch_size'] ) UpperCamelCase = evaluate.load('glue' , 'mrpc' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase ) UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase ) # Instantiate scheduler UpperCamelCase = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase = model(**lowercase ) UpperCamelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase = model(**lowercase ) UpperCamelCase = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowercase , references=lowercase , ) UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def A ( ) -> Optional[int]: '''simple docstring''' UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) UpperCamelCase = parser.parse_args() UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
3
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if issubclass(lowercase , lowercase ): UpperCamelCase = parquet_path elif issubclass(lowercase , lowercase ): UpperCamelCase = [parquet_path] UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple: '''simple docstring''' assert isinstance(lowercase , lowercase ) for split in splits: UpperCamelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if split: UpperCamelCase = {split: parquet_path} else: UpperCamelCase = 'train' UpperCamelCase = {'train': parquet_path, 'test': parquet_path} UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) UpperCamelCase = pf.read() assert dataset.data.table == output_table def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' ) UpperCamelCase = {'image': [image_path]} UpperCamelCase = Features({'image': Image()} ) UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase ) UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' assert get_writer_batch_size(lowercase ) == expected
3
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : List[str] = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Dict: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = qkv_bias UpperCamelCase = num_detection_tokens UpperCamelCase = use_mid_position_embeddings UpperCamelCase = auxiliary_loss # Hungarian matcher UpperCamelCase = class_cost UpperCamelCase = bbox_cost UpperCamelCase = giou_cost # Loss coefficients UpperCamelCase = bbox_loss_coefficient UpperCamelCase = giou_loss_coefficient UpperCamelCase = eos_coefficient class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = version.parse("1.11" ) @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __UpperCamelCase ( self ) -> float: """simple docstring""" return 1e-4 @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return 12
3
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size if size is not None else {'height': 18, 'width': 20} UpperCamelCase = do_thumbnail UpperCamelCase = do_align_axis UpperCamelCase = do_pad UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = DonutImageProcessingTester(self ) @property def __UpperCamelCase ( self ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) self.assertTrue(hasattr(A_ , 'do_thumbnail' ) ) self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def __UpperCamelCase ( self ) -> int: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Any: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
3
1
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _UpperCAmelCase : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512} def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char UpperCamelCase = set(lowercase ) return pairs class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]: """simple docstring""" super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ ) UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ ) UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ ) if "\n" in token: UpperCamelCase = token.replace('\n' , ' __newln__' ) UpperCamelCase = token.split(' ' ) UpperCamelCase = [] for token in tokens: if not len(A_ ): continue UpperCamelCase = token.lower() UpperCamelCase = tuple(A_ ) UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCamelCase = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) new_word.extend(word[i:j] ) UpperCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = '@@ '.join(A_ ) UpperCamelCase = word[:-4] UpperCamelCase = word words.append(A_ ) return " ".join(A_ ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = re.findall(r'\S+\n?' , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) ) return split_tokens def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" return self.decoder.get(A_ , self.unk_token ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip() return out_string def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file
3
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def A ( lowercase ) -> str: '''simple docstring''' monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() ) @pytest.fixture def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' class lowercase : def __init__( self , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = metric_id class lowercase : __lowercase : Any = [MetricMock(_SCREAMING_SNAKE_CASE ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() ) @pytest.mark.parametrize( 'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' if "tmp_path" in args: UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(lowercase , match='https://huggingface.co/docs/evaluate' ): func(*lowercase )
3
def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
3
1
def A ( lowercase ) -> bool: '''simple docstring''' UpperCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
3
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : str = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Optional[Any] = "unispeech-sat" def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1_500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , ) -> int: """simple docstring""" super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ ) UpperCamelCase = hidden_size UpperCamelCase = feat_extract_norm UpperCamelCase = feat_extract_activation UpperCamelCase = list(A_ ) UpperCamelCase = list(A_ ) UpperCamelCase = list(A_ ) UpperCamelCase = conv_bias UpperCamelCase = num_conv_pos_embeddings UpperCamelCase = num_conv_pos_embedding_groups UpperCamelCase = len(self.conv_dim ) UpperCamelCase = num_hidden_layers UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = num_attention_heads UpperCamelCase = hidden_dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = feat_proj_dropout UpperCamelCase = final_dropout UpperCamelCase = layerdrop UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = vocab_size UpperCamelCase = num_clusters UpperCamelCase = do_stable_layer_norm UpperCamelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase = apply_spec_augment UpperCamelCase = mask_time_prob UpperCamelCase = mask_time_length UpperCamelCase = mask_time_min_masks UpperCamelCase = mask_feature_prob UpperCamelCase = mask_feature_length UpperCamelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCamelCase = num_codevectors_per_group UpperCamelCase = num_codevector_groups UpperCamelCase = contrastive_logits_temperature UpperCamelCase = feat_quantizer_dropout UpperCamelCase = num_negatives UpperCamelCase = codevector_dim UpperCamelCase = proj_codevector_dim UpperCamelCase = diversity_loss_weight # ctc loss UpperCamelCase = ctc_loss_reduction UpperCamelCase = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCamelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCamelCase = list(A_ ) UpperCamelCase = list(A_ ) UpperCamelCase = list(A_ ) UpperCamelCase = xvector_output_dim @property def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
3
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
1
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowercase : def __init__( self ) -> Any: """simple docstring""" UpperCamelCase = '' UpperCamelCase = '' UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 256 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = cva.imread(A_ , 0 ) UpperCamelCase = copy.deepcopy(self.img ) UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' ) UpperCamelCase = np.sum(A_ ) for i in range(len(A_ ) ): UpperCamelCase = x[i] / self.k self.sk += prk UpperCamelCase = (self.L - 1) * self.sk if self.rem != 0: UpperCamelCase = int(last % last ) UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(A_ ) UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size ) UpperCamelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCamelCase = self.img[j][i] if num != self.last_list[num]: UpperCamelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": _UpperCAmelCase : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg") _UpperCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_SCREAMING_SNAKE_CASE ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , *A_ , **A_ ) -> Tuple: """simple docstring""" super().__init__(*A_ , **A_ ) requires_backends(self , 'vision' ) self.check_model_type(A_ ) def __call__( self , A_ , **A_ ) -> Dict: """simple docstring""" return super().__call__(A_ , **A_ ) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" return {}, {}, {} def __UpperCamelCase ( self , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = load_image(A_ ) UpperCamelCase = image.size UpperCamelCase = self.image_processor(images=A_ , return_tensors=self.framework ) return model_inputs def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = self.model(**A_ ) return model_outputs def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = model_outputs.predicted_depth UpperCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=A_ ) UpperCamelCase = prediction.squeeze().cpu().numpy() UpperCamelCase = (output * 255 / np.max(A_ )).astype('uint8' ) UpperCamelCase = Image.fromarray(A_ ) UpperCamelCase = {} UpperCamelCase = predicted_depth UpperCamelCase = depth return output_dict
3
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
1
class lowercase : def __init__( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = None UpperCamelCase = None UpperCamelCase = graph self._normalize_graph(A_ , A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = None def __UpperCamelCase ( self , A_ , A_ ) -> str: """simple docstring""" if sources is int: UpperCamelCase = [sources] if sinks is int: UpperCamelCase = [sinks] if len(A_ ) == 0 or len(A_ ) == 0: return UpperCamelCase = sources[0] UpperCamelCase = sinks[0] # make fake vertex if there are more # than one source or sink if len(A_ ) > 1 or len(A_ ) > 1: UpperCamelCase = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase = max_input_flow UpperCamelCase = 0 UpperCamelCase = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase = max_input_flow UpperCamelCase = size - 1 def __UpperCamelCase ( self ) -> Dict: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('You need to set maximum flow algorithm before.' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCamelCase ( self , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = algorithm(self ) class lowercase : def __init__( self , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = flow_network UpperCamelCase = flow_network.verticesCount UpperCamelCase = flow_network.sourceIndex UpperCamelCase = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase = flow_network.graph UpperCamelCase = False def __UpperCamelCase ( self ) -> Dict: """simple docstring""" if not self.executed: self._algorithm() UpperCamelCase = True def __UpperCamelCase ( self ) -> Dict: """simple docstring""" pass class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> List[Any]: """simple docstring""" super().__init__(A_ ) # use this to save your result UpperCamelCase = -1 def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: raise Exception('You should execute algorithm before using its result!' ) return self.maximum_flow class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> List[str]: """simple docstring""" super().__init__(A_ ) UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase = [0] * self.verticies_count UpperCamelCase = [0] * self.verticies_count def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase = 0 while i < len(A_ ): UpperCamelCase = vertices_list[i] UpperCamelCase = self.heights[vertex_index] self.process_vertex(A_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(A_ ) ) UpperCamelCase = 0 else: i += 1 UpperCamelCase = sum(self.preflow[self.source_index] ) def __UpperCamelCase ( self , A_ ) -> List[Any]: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(A_ , A_ ) self.relabel(A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> Dict: """simple docstring""" UpperCamelCase = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCamelCase ( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase = self.heights[to_index] if min_height is not None: UpperCamelCase = min_height + 1 if __name__ == "__main__": _UpperCAmelCase : str = [0] _UpperCAmelCase : List[str] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] _UpperCAmelCase : Optional[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network _UpperCAmelCase : Any = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate _UpperCAmelCase : str = flow_network.find_maximum_flow() print(F'''maximum flow is {maximum_flow}''')
3
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
1
import os import sys import unittest _UpperCAmelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCAmelCase : str = os.path.join(git_repo_path, "src", "transformers") _UpperCAmelCase : Union[str, Any] = "\n{0} = None\n" _UpperCAmelCase : List[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" _UpperCAmelCase : Optional[int] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(A_ ) UpperCamelCase = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(A_ , 'tokenizers' ) UpperCamelCase = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(A_ , 'tensorflow_text' ) UpperCamelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(A_ , 'sentencepiece_and_tokenizers' ) UpperCamelCase = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(A_ , 'sentencepiece_and_tensorflow_text' ) UpperCamelCase = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(A_ , 'sentencepiece_and_tokenizers_and_vision' ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A_ ) self.assertIn('tensorflow_text' , A_ ) self.assertIn('sentencepiece_and_tokenizers' , A_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(A_ , '\nCONSTANT = None\n' ) UpperCamelCase = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , A_ )
3
from string import ascii_uppercase _UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)} _UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase)) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = 0 while True: if x == i: UpperCamelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def A ( ) -> None: '''simple docstring''' UpperCamelCase = 'THE GERMAN ATTACK' UpperCamelCase = 'SECRET' UpperCamelCase = generate_key(lowercase , lowercase ) UpperCamelCase = cipher_text(lowercase , lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase , lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
3
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : str = StableDiffusionXLImgaImgPipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __lowercase : Any = PipelineTesterMixin.required_optional_params - {"latents"} __lowercase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase = EulerDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=32 , ) UpperCamelCase = CLIPTextModel(A_ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ ) UpperCamelCase = CLIPTextModelWithProjection(A_ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def __UpperCamelCase ( self , A_ , A_=0 ) -> Optional[Any]: """simple docstring""" UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase = image / 2 + 0.5 if str(A_ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(A_ ) else: UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ ) UpperCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = sd_pipe(**A_ ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ ) UpperCamelCase = sd_pipe.to(A_ ) UpperCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) # forward without prompt embeds UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * ['this is a negative prompt'] UpperCamelCase = negative_prompt UpperCamelCase = 3 * [inputs['prompt']] UpperCamelCase = sd_pipe(**A_ ) UpperCamelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * ['this is a negative prompt'] UpperCamelCase = 3 * [inputs.pop('prompt' )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = sd_pipe.encode_prompt(A_ , negative_prompt=A_ ) UpperCamelCase = sd_pipe( **A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , ) UpperCamelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ) -> Any: """simple docstring""" UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) ) UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) UpperCamelCase = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_inputs(A_ ) UpperCamelCase = pipe(**A_ ).images UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
3
from collections.abc import Callable def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def A ( lowercase ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
3
1
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def A ( lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase = [] for part_id in partition_order: UpperCamelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(lowercase ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> List[Any]: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(100 ).repartition(1 ) UpperCamelCase = Spark(lowercase ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> Dict: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(10 ).repartition(2 ) UpperCamelCase = [1, 0] UpperCamelCase = _generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions. UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase ) for i, (row_id, row_dict) in enumerate(generate_fn() ): UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> List[Any]: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(10 ).repartition(1 ) UpperCamelCase = SparkExamplesIterable(lowercase ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowercase ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> str: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: UpperCamelCase = lambda lowercase : x.reverse() UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] ) UpperCamelCase = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowercase ): UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> str: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowercase ): UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowercase ): UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A ( ) -> List[Any]: '''simple docstring''' UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() UpperCamelCase = spark.range(100 ).repartition(1 ) UpperCamelCase = Spark(lowercase ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
3
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _UpperCAmelCase : Any = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = 1 __lowercase : Dict = 2 __lowercase : Dict = 3 __lowercase : Union[str, Any] = 4 __lowercase : str = 5 __lowercase : Optional[int] = 6 __lowercase : Dict = 7 __lowercase : Dict = 8 __lowercase : Optional[Any] = 9 __lowercase : List[Any] = 10 __lowercase : str = 11 __lowercase : Tuple = 12 __lowercase : List[Any] = 13 __lowercase : List[Any] = 14 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : torch.FloatTensor class lowercase : __lowercase : Optional[int] = SCHEDULER_CONFIG_NAME __lowercase : Optional[int] = [] __lowercase : str = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , return_commit_hash=A_ , **A_ , ) return cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> Tuple: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Union[str, Any] = "linear" __lowercase : Tuple = "cosine" __lowercase : Tuple = "cosine_with_restarts" __lowercase : int = "polynomial" __lowercase : Optional[int] = "constant" __lowercase : Optional[Any] = "constant_with_warmup" __lowercase : List[Any] = "piecewise_constant" def A ( lowercase , lowercase = -1 ) -> Optional[Any]: '''simple docstring''' return LambdaLR(lowercase , lambda lowercase : 1 , last_epoch=lowercase ) def A ( lowercase , lowercase , lowercase = -1 ) -> List[str]: '''simple docstring''' def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1.0 , lowercase ) ) return 1.0 return LambdaLR(lowercase , lowercase , last_epoch=lowercase ) def A ( lowercase , lowercase , lowercase = -1 ) -> List[str]: '''simple docstring''' UpperCamelCase = {} UpperCamelCase = step_rules.split(',' ) for rule_str in rule_list[:-1]: UpperCamelCase , UpperCamelCase = rule_str.split(':' ) UpperCamelCase = int(lowercase ) UpperCamelCase = float(lowercase ) UpperCamelCase = value UpperCamelCase = float(rule_list[-1] ) def create_rules_function(lowercase , lowercase ): def rule_func(lowercase ) -> float: UpperCamelCase = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowercase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func UpperCamelCase = create_rules_function(lowercase , lowercase ) return LambdaLR(lowercase , lowercase , last_epoch=lowercase ) def A ( lowercase , lowercase , lowercase , lowercase=-1 ) -> Any: '''simple docstring''' def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowercase , lowercase , lowercase ) def A ( lowercase , lowercase , lowercase , lowercase = 0.5 , lowercase = -1 ) -> Optional[Any]: '''simple docstring''' def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) UpperCamelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) ) return LambdaLR(lowercase , lowercase , lowercase ) def A ( lowercase , lowercase , lowercase , lowercase = 1 , lowercase = -1 ) -> List[Any]: '''simple docstring''' def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) UpperCamelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) ) return LambdaLR(lowercase , lowercase , lowercase ) def A ( lowercase , lowercase , lowercase , lowercase=1e-7 , lowercase=1.0 , lowercase=-1 ) -> Dict: '''simple docstring''' UpperCamelCase = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: UpperCamelCase = lr_init - lr_end UpperCamelCase = num_training_steps - num_warmup_steps UpperCamelCase = 1 - (current_step - num_warmup_steps) / decay_steps UpperCamelCase = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowercase , lowercase , lowercase ) _UpperCAmelCase : Optional[int] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def A ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = 1 , lowercase = 1.0 , lowercase = -1 , ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = SchedulerType(lowercase ) UpperCamelCase = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowercase , last_epoch=lowercase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowercase , step_rules=lowercase , last_epoch=lowercase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowercase , num_warmup_steps=lowercase , last_epoch=lowercase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , num_cycles=lowercase , last_epoch=lowercase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , power=lowercase , last_epoch=lowercase , ) return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , last_epoch=lowercase )
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu _UpperCAmelCase : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def A ( lowercase , lowercase=None , lowercase=None , lowercase=None ) -> str: '''simple docstring''' UpperCamelCase = True while ask_again: UpperCamelCase = input(lowercase ) try: if default is not None and len(lowercase ) == 0: return default return convert_value(lowercase ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase ) def A ( lowercase , lowercase=[] , lowercase=None , lowercase=0 ) -> Any: '''simple docstring''' UpperCamelCase = BulletMenu(lowercase , lowercase ) UpperCamelCase = menu.run(default_choice=lowercase ) return convert_value(lowercase ) if convert_value is not None else result def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def A ( lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = int(lowercase ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def A ( lowercase ) -> Optional[Any]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class lowercase ( argparse.RawDescriptionHelpFormatter ): def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> str: """simple docstring""" UpperCamelCase = super()._format_usage(A_ , A_ , A_ , A_ ) UpperCamelCase = usage.replace('<command> [<args>] ' , '' ) return usage
3
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
3
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = (DDPMScheduler,) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device _UpperCAmelCase : Tuple = False class lowercase ( unittest.TestCase ): pass @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' ) # remove text_unet pipe.remove_unused_weights() pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = 'A painting of a squirrel eating a burger ' UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = pipe( prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A_ ) UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = generator.manual_seed(0 ) UpperCamelCase = pipe( prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained( 'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = 'A painting of a squirrel eating a burger ' UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = pipe( prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images UpperCamelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
3
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
1
_UpperCAmelCase : List[str] = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def A ( lowercase , lowercase , lowercase ) -> list[str]: '''simple docstring''' UpperCamelCase = set() # keep track of all the paths to be checked UpperCamelCase = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue UpperCamelCase = queue.pop(0 ) # get the last node from the path UpperCamelCase = path[-1] if node not in explored: UpperCamelCase = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: UpperCamelCase = list(lowercase ) new_path.append(lowercase ) queue.append(lowercase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowercase ) # in case there's no path between the 2 nodes return [] def A ( lowercase , lowercase , lowercase ) -> int: '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 UpperCamelCase = [start] UpperCamelCase = set(lowercase ) # Keep tab on distances from `start` node. UpperCamelCase = {start: 0, target: -1} while queue: UpperCamelCase = queue.pop(0 ) if node == target: UpperCamelCase = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowercase ) queue.append(lowercase ) UpperCamelCase = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
3
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
1
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _UpperCAmelCase : List[Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class lowercase : __lowercase : str __lowercase : Optional[str] = None __lowercase : Optional[Union[str, int]] = None __lowercase : Optional[Union[str, int]] = None __lowercase : Optional[Union[str, int]] = None def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase , UpperCamelCase = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Optional[Any]: """simple docstring""" return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def __UpperCamelCase ( self ) -> str: """simple docstring""" return self.major, self.minor, self.patch def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if isinstance(A_ , A_ ): return Version(A_ ) elif isinstance(A_ , A_ ): return other raise TypeError(F'''{other} (type {type(A_ )}) cannot be compared to version.''' ) def __eq__( self , A_ ) -> int: """simple docstring""" try: UpperCamelCase = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , A_ ) -> Any: """simple docstring""" UpperCamelCase = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self ) -> List[Any]: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def __UpperCamelCase ( self ) -> str: """simple docstring""" return self.version_str def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = _VERSION_REG.match(lowercase ) if not res: raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(lowercase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def A ( lowercase ) -> int: '''simple docstring''' return ".".join(str(lowercase ) for v in version_tuple )
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = "data2vec-text" def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class lowercase ( _SCREAMING_SNAKE_CASE ): @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
3
1
def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(lowercase , lowercase ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate UpperCamelCase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly UpperCamelCase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
3
from random import shuffle import tensorflow as tf from numpy import array def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) assert noofclusters < len(lowercase ) # Find out the dimensionality UpperCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCamelCase = list(range(len(lowercase ) ) ) shuffle(lowercase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCamelCase = tf.placeholder('float64' , [dim] ) UpperCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase , lowercase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCamelCase = tf.placeholder('int32' ) UpperCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase , lowercase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCamelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCamelCase = tf.reduce_mean(lowercase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCamelCase = tf.placeholder('float' , [noofclusters] ) UpperCamelCase = tf.argmin(lowercase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCamelCase = 100 for _ in range(lowercase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase ) ): UpperCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCamelCase = [ sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCamelCase = sess.run( lowercase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase ): # Collect all the vectors assigned to this cluster UpperCamelCase = [ vectors[i] for i in range(len(lowercase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCamelCase = sess.run( lowercase , feed_dict={mean_input: array(lowercase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCamelCase = sess.run(lowercase ) UpperCamelCase = sess.run(lowercase ) return centroids, assignments
3
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) def A ( lowercase=None , lowercase=None ) -> List[str]: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase ) @dataclass class lowercase : __lowercase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __lowercase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __lowercase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"} ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"} ) __lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"} ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __lowercase : str = field( default=f"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) __lowercase : str = field( default=f"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __lowercase : str = field( default=f"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __lowercase : str = field( default=f"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __lowercase : str = field( default=f"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) __lowercase : str = field( default=f"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) __lowercase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __lowercase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" warnings.warn( F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' ' are deprecated in general and it is advised to use external Benchmarking libraries ' ' to benchmark Transformer models.' , A_ , ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( 'Please make sure you provide at least one model name / model identifier, *e.g.* `--models' ' bert-base-cased` or `args.models = [\'bert-base-cased\'].' ) return self.models @property def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.' ) return False else: return True
3
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _UpperCAmelCase : Tuple = _symbol_database.Default() _UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _UpperCAmelCase : int = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _UpperCAmelCase : Optional[Any] = 45 _UpperCAmelCase : Any = 1_581 _UpperCAmelCase : Tuple = 1_517 _UpperCAmelCase : List[str] = 1_570 _UpperCAmelCase : int = 1_584 _UpperCAmelCase : List[Any] = 1_793 _UpperCAmelCase : Optional[int] = 1_795 _UpperCAmelCase : Any = 1_916 _UpperCAmelCase : Tuple = 1_864 _UpperCAmelCase : List[Any] = 1_905 _UpperCAmelCase : Union[str, Any] = 1_919 _UpperCAmelCase : str = 2_429 _UpperCAmelCase : Any = 2_208 _UpperCAmelCase : Dict = 2_418 _UpperCAmelCase : Optional[Any] = 2_323 _UpperCAmelCase : Tuple = 2_407 # @@protoc_insertion_point(module_scope)
3
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : List[Any] = { "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"], "tokenization_lxmert": ["LxmertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = ["LxmertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "LxmertEncoder", "LxmertForPreTraining", "LxmertForQuestionAnswering", "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", "LxmertXLayer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys _UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 try: UpperCamelCase = tempfile.mktemp() with open(A_ , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ ) UpperCamelCase = AlbertTokenizer.from_pretrained(A_ ) finally: os.remove(A_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ ) UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowercase ( unittest.TestCase ): __lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def __UpperCamelCase ( cls ) -> Tuple: """simple docstring""" UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def __UpperCamelCase ( cls ) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def __UpperCamelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __UpperCamelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = CustomTokenizer(A_ ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) UpperCamelCase = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def __UpperCamelCase ( self ) -> int: """simple docstring""" # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCamelCase = Trie() UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(A_ , ['AB', 'C'] )
3
1
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def A ( lowercase , lowercase , lowercase , lowercase="attention" ) -> List[Any]: '''simple docstring''' UpperCamelCase = UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCamelCase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def A ( lowercase , lowercase , lowercase , lowercase=False ) -> Tuple: '''simple docstring''' if split_mlp_wi: UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] UpperCamelCase = (wi_a, wi_a) else: UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] UpperCamelCase = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def A ( lowercase , lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def A ( lowercase , *, lowercase , lowercase , lowercase = False ) -> Tuple: '''simple docstring''' UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) UpperCamelCase = {'/'.join(lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , lowercase ) UpperCamelCase = collections.OrderedDict() # Shared embeddings. UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(lowercase ): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(lowercase , lowercase , 'encoder' , 'pre_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(lowercase , lowercase , 'encoder' , 'attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (MLP). UpperCamelCase = tax_layer_norm_lookup(lowercase , lowercase , 'encoder' , 'pre_mlp_layer_norm' ) UpperCamelCase , UpperCamelCase = tax_mlp_lookup(lowercase , lowercase , 'encoder' , lowercase ) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase = tax_relpos_bias_lookup( lowercase , lowercase , 'encoder' ).T UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: UpperCamelCase = tax_relpos_bias_lookup( lowercase , 0 , 'encoder' ).T UpperCamelCase = tax_relpos_bias_lookup( lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(lowercase ): # Block i, layer 0 (Self Attention). UpperCamelCase = tax_layer_norm_lookup(lowercase , lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(lowercase , lowercase , 'decoder' , 'self_attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). UpperCamelCase = tax_layer_norm_lookup(lowercase , lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(lowercase , lowercase , 'decoder' , 'encoder_decoder_attention' ) UpperCamelCase = layer_norm UpperCamelCase = k.T UpperCamelCase = o.T UpperCamelCase = q.T UpperCamelCase = v.T # Block i, layer 2 (MLP). UpperCamelCase = tax_layer_norm_lookup(lowercase , lowercase , 'decoder' , 'pre_mlp_layer_norm' ) UpperCamelCase , UpperCamelCase = tax_mlp_lookup(lowercase , lowercase , 'decoder' , lowercase ) UpperCamelCase = layer_norm if split_mlp_wi: UpperCamelCase = wi[0].T UpperCamelCase = wi[1].T else: UpperCamelCase = wi.T UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCamelCase = tax_relpos_bias_lookup(lowercase , lowercase , 'decoder' ).T UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) UpperCamelCase = state_dict['shared.weight'] return state_dict def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase = checkpoints.load_tax_checkpoint(lowercase ) UpperCamelCase = convert_tax_to_pytorch( lowercase , num_layers=config.num_layers , is_encoder_only=lowercase , scalable_attention=lowercase ) UpperCamelCase = make_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase , strict=lowercase ) def A ( lowercase , lowercase , lowercase , lowercase = False , lowercase = False , ) -> List[Any]: '''simple docstring''' UpperCamelCase = MTaConfig.from_json_file(lowercase ) print(f'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCamelCase = UMTaEncoderModel(lowercase ) else: UpperCamelCase = UMTaForConditionalGeneration(lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase , lowercase , lowercase , lowercase , lowercase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase ) print('Done' ) if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) _UpperCAmelCase : Any = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
3
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if issubclass(lowercase , lowercase ): UpperCamelCase = parquet_path elif issubclass(lowercase , lowercase ): UpperCamelCase = [parquet_path] UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple: '''simple docstring''' assert isinstance(lowercase , lowercase ) for split in splits: UpperCamelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if split: UpperCamelCase = {split: parquet_path} else: UpperCamelCase = 'train' UpperCamelCase = {'train': parquet_path, 'test': parquet_path} UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) UpperCamelCase = pf.read() assert dataset.data.table == output_table def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' ) UpperCamelCase = {'image': [image_path]} UpperCamelCase = Features({'image': Image()} ) UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase ) UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' assert get_writer_batch_size(lowercase ) == expected
3
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def A ( lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = FileLock(str(tmpdir / 'foo.lock' ) ) UpperCamelCase = FileLock(str(tmpdir / 'foo.lock' ) ) UpperCamelCase = 0.0_1 with locka.acquire(): with pytest.raises(lowercase ): UpperCamelCase = time.time() locka.acquire(lowercase ) assert time.time() - _start > timeout def A ( lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = 'a' * 1_000 + '.lock' UpperCamelCase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(lowercase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCamelCase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase ): locka.acquire(0 )
3
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size if size is not None else {'height': 18, 'width': 20} UpperCamelCase = do_thumbnail UpperCamelCase = do_align_axis UpperCamelCase = do_pad UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = DonutImageProcessingTester(self ) @property def __UpperCamelCase ( self ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) self.assertTrue(hasattr(A_ , 'do_thumbnail' ) ) self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def __UpperCamelCase ( self ) -> int: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Any: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
3
1
from string import ascii_lowercase, ascii_uppercase def A ( lowercase ) -> str: '''simple docstring''' if not sentence: return "" UpperCamelCase = dict(zip(lowercase , lowercase ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
3
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _UpperCAmelCase : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512} def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char UpperCamelCase = set(lowercase ) return pairs class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]: """simple docstring""" super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ ) UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ ) UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ ) if "\n" in token: UpperCamelCase = token.replace('\n' , ' __newln__' ) UpperCamelCase = token.split(' ' ) UpperCamelCase = [] for token in tokens: if not len(A_ ): continue UpperCamelCase = token.lower() UpperCamelCase = tuple(A_ ) UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCamelCase = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) new_word.extend(word[i:j] ) UpperCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = '@@ '.join(A_ ) UpperCamelCase = word[:-4] UpperCamelCase = word words.append(A_ ) return " ".join(A_ ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = re.findall(r'\S+\n?' , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) ) return split_tokens def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" return self.decoder.get(A_ , self.unk_token ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip() return out_string def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file
3
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
3
def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
3
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _UpperCAmelCase : Optional[Any] = pd.read_csv("sample_data.csv", header=None) _UpperCAmelCase : int = df.shape[:1][0] # If you're using some other dataset input the target column _UpperCAmelCase : Tuple = df.iloc[:, 1:2] _UpperCAmelCase : Tuple = actual_data.values.reshape(len_data, 1) _UpperCAmelCase : str = MinMaxScaler().fit_transform(actual_data) _UpperCAmelCase : Any = 10 _UpperCAmelCase : str = 5 _UpperCAmelCase : List[Any] = 20 _UpperCAmelCase : List[Any] = len_data - periods * look_back _UpperCAmelCase : Optional[int] = actual_data[:division] _UpperCAmelCase : Any = actual_data[division - look_back :] _UpperCAmelCase ,_UpperCAmelCase : Any = [], [] _UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _UpperCAmelCase : Union[str, Any] = np.array(train_x) _UpperCAmelCase : Optional[int] = np.array(test_x) _UpperCAmelCase : List[str] = np.array([list(i.ravel()) for i in train_y]) _UpperCAmelCase : Optional[int] = np.array([list(i.ravel()) for i in test_y]) _UpperCAmelCase : Dict = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") _UpperCAmelCase : Optional[Any] = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) _UpperCAmelCase : int = model.predict(x_test)
3
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Union[str, Any] = VideoToVideoSDPipeline __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} __lowercase : str = PipelineTesterMixin.required_optional_params - {"latents"} __lowercase : int = False # No `output_type`. __lowercase : int = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) UpperCamelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) UpperCamelCase = CLIPTextModel(A_ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def __UpperCamelCase ( self , A_ , A_=0 ) -> Any: """simple docstring""" # 3 frames UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(A_ ) else: UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = VideoToVideoSDPipeline(**A_ ) UpperCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 'np' UpperCamelCase = sd_pipe(**A_ ).frames UpperCamelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCamelCase ( self ) -> int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5e-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def __UpperCamelCase ( self ) -> str: """simple docstring""" pass def __UpperCamelCase ( self ) -> int: """simple docstring""" return super().test_progress_bar() @slow @skip_mps class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=A_ ) UpperCamelCase = video.to('cuda' ) UpperCamelCase = 'Spiderman is surfing' UpperCamelCase = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='pt' ).frames UpperCamelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
3
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
1
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _UpperCAmelCase : Tuple = _symbol_database.Default() _UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _UpperCAmelCase : int = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _UpperCAmelCase : Optional[Any] = 45 _UpperCAmelCase : Any = 1_581 _UpperCAmelCase : Tuple = 1_517 _UpperCAmelCase : List[str] = 1_570 _UpperCAmelCase : int = 1_584 _UpperCAmelCase : List[Any] = 1_793 _UpperCAmelCase : Optional[int] = 1_795 _UpperCAmelCase : Any = 1_916 _UpperCAmelCase : Tuple = 1_864 _UpperCAmelCase : List[Any] = 1_905 _UpperCAmelCase : Union[str, Any] = 1_919 _UpperCAmelCase : str = 2_429 _UpperCAmelCase : Any = 2_208 _UpperCAmelCase : Dict = 2_418 _UpperCAmelCase : Optional[Any] = 2_323 _UpperCAmelCase : Tuple = 2_407 # @@protoc_insertion_point(module_scope)
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase : str = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Any = "RegNetConfig" # Base docstring _UpperCAmelCase : str = "facebook/regnet-y-040" _UpperCAmelCase : Optional[int] = [1, 1_088, 7, 7] # Image classification docstring _UpperCAmelCase : Optional[int] = "facebook/regnet-y-040" _UpperCAmelCase : str = "tabby, tabby cat" _UpperCAmelCase : List[Any] = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , ) -> Optional[Any]: """simple docstring""" super().__init__(**A_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) UpperCamelCase = tf.keras.layers.ConvaD( filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , ) UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = self.convolution(self.padding(A_ ) ) UpperCamelCase = self.normalization(A_ ) UpperCamelCase = self.activation(A_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , **A_ ) -> Tuple: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = config.num_channels UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = shape_list(A_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) ) UpperCamelCase = self.embedder(A_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ = 2 , **A_ ) -> Dict: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = tf.keras.layers.ConvaD( filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' ) UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) def __UpperCamelCase ( self , A_ , A_ = False ) -> tf.Tensor: """simple docstring""" return self.normalization(self.convolution(A_ ) , training=A_ ) class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ , **A_ ) -> Any: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' ) UpperCamelCase = [ tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] UpperCamelCase = self.pooler(A_ ) for layer_module in self.attention: UpperCamelCase = layer_module(A_ ) UpperCamelCase = hidden_state * pooled return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> Dict: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = in_channels != out_channels or stride != 1 UpperCamelCase = max(1 , out_channels // config.groups_width ) UpperCamelCase = ( TFRegNetShortCut(A_ , stride=A_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. UpperCamelCase = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ), ] UpperCamelCase = ACTaFN[config.hidden_act] def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = hidden_state for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) UpperCamelCase = self.shortcut(A_ ) hidden_state += residual UpperCamelCase = self.activation(A_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> Optional[int]: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = in_channels != out_channels or stride != 1 UpperCamelCase = max(1 , out_channels // config.groups_width ) UpperCamelCase = ( TFRegNetShortCut(A_ , stride=A_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) UpperCamelCase = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ), ] UpperCamelCase = ACTaFN[config.hidden_act] def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = hidden_state for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) UpperCamelCase = self.shortcut(A_ ) hidden_state += residual UpperCamelCase = self.activation(A_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ ) -> Any: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ), *[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): def __init__( self , A_ , **A_ ) -> Dict: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) ) def __UpperCamelCase ( self , A_ , A_ = False , A_ = True ) -> TFBaseModelOutputWithNoAttention: """simple docstring""" UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase = hidden_states + (hidden_state,) UpperCamelCase = stage_module(A_ ) if output_hidden_states: UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ ) @keras_serializable class lowercase ( tf.keras.layers.Layer ): __lowercase : Union[str, Any] = RegNetConfig def __init__( self , A_ , **A_ ) -> Tuple: """simple docstring""" super().__init__(**A_ ) UpperCamelCase = config UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' ) UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' ) UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' ) @unpack_inputs def __UpperCamelCase ( self , A_ , A_ = None , A_ = None , A_ = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: """simple docstring""" UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.embedder(A_ , training=A_ ) UpperCamelCase = self.encoder( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) UpperCamelCase = encoder_outputs[0] UpperCamelCase = self.pooler(A_ ) # Change to NCHW output format have uniformity in the modules UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) ) UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = RegNetConfig __lowercase : List[Any] = "regnet" __lowercase : List[Any] = "pixel_values" @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _UpperCAmelCase : str = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _UpperCAmelCase : Dict = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , *A_ , **A_ ) -> Any: """simple docstring""" super().__init__(A_ , *A_ , **A_ ) UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCamelCase ( self , A_ , A_ = None , A_ = None , A_=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: """simple docstring""" UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.regnet( pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , ) class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , *A_ , **A_ ) -> Optional[Any]: """simple docstring""" super().__init__(A_ , *A_ , **A_ ) UpperCamelCase = config.num_labels UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' ) # classification head UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCamelCase ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: """simple docstring""" UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.regnet( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase = self.classifier[0](A_ ) UpperCamelCase = self.classifier[1](A_ ) UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ ) if not return_dict: UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
3
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
1
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : str = logging.get_logger(__name__) class lowercase : def __init__( self , A_ = None , A_ = None , A_=None , A_=None ) -> str: """simple docstring""" if not conversation_id: UpperCamelCase = uuid.uuida() if past_user_inputs is None: UpperCamelCase = [] if generated_responses is None: UpperCamelCase = [] UpperCamelCase = conversation_id UpperCamelCase = past_user_inputs UpperCamelCase = generated_responses UpperCamelCase = text def __eq__( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __UpperCamelCase ( self , A_ , A_ = False ) -> Dict: """simple docstring""" if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) UpperCamelCase = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: UpperCamelCase = text def __UpperCamelCase ( self ) -> Any: """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) UpperCamelCase = None def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.generated_responses.append(A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> str: """simple docstring""" UpperCamelCase = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): UpperCamelCase = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( _SCREAMING_SNAKE_CASE , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , *A_ , **A_ ) -> List[str]: """simple docstring""" super().__init__(*A_ , **A_ ) if self.tokenizer.pad_token_id is None: UpperCamelCase = self.tokenizer.eos_token def __UpperCamelCase ( self , A_=None , A_=None , A_=None , **A_ ) -> List[str]: """simple docstring""" UpperCamelCase = {} UpperCamelCase = {} UpperCamelCase = {} if min_length_for_response is not None: UpperCamelCase = min_length_for_response if minimum_tokens is not None: UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: UpperCamelCase = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(A_ ) return preprocess_params, forward_params, postprocess_params def __call__( self , A_ , A_=0 , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ ) if isinstance(A_ , A_ ) and len(A_ ) == 1: return outputs[0] return outputs def __UpperCamelCase ( self , A_ , A_=32 ) -> Dict[str, Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version UpperCamelCase = self._legacy_parse_and_tokenize(A_ ) if self.framework == "pt": UpperCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": UpperCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def __UpperCamelCase ( self , A_ , A_=10 , **A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) UpperCamelCase = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) UpperCamelCase = max_length - minimum_tokens UpperCamelCase = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: UpperCamelCase = model_inputs['attention_mask'][:, -trim:] UpperCamelCase = model_inputs.pop('conversation' ) UpperCamelCase = max_length UpperCamelCase = self.model.generate(**A_ , **A_ ) if self.model.config.is_encoder_decoder: UpperCamelCase = 1 else: UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __UpperCamelCase ( self , A_ , A_=True ) -> List[str]: """simple docstring""" UpperCamelCase = model_outputs['output_ids'] UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , ) UpperCamelCase = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(A_ ) return conversation def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = self.tokenizer.eos_token_id UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) ) if len(A_ ) > self.tokenizer.model_max_length: UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
3
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
1
import colorsys from PIL import Image # type: ignore def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = x UpperCamelCase = y for step in range(lowercase ): # noqa: B007 UpperCamelCase = a * a - b * b + x UpperCamelCase = 2 * a * b + y UpperCamelCase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def A ( lowercase ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def A ( lowercase ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase , 1 , 1 ) ) def A ( lowercase = 800 , lowercase = 600 , lowercase = -0.6 , lowercase = 0 , lowercase = 3.2 , lowercase = 50 , lowercase = True , ) -> Image.Image: '''simple docstring''' UpperCamelCase = Image.new('RGB' , (image_width, image_height) ) UpperCamelCase = img.load() # loop through the image-coordinates for image_x in range(lowercase ): for image_y in range(lowercase ): # determine the figure-coordinates based on the image-coordinates UpperCamelCase = figure_width / image_width * image_height UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCamelCase = get_distance(lowercase , lowercase , lowercase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCamelCase = get_color_coded_rgb(lowercase ) else: UpperCamelCase = get_black_and_white_rgb(lowercase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _UpperCAmelCase : List[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
3
from string import ascii_uppercase _UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)} _UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase)) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = 0 while True: if x == i: UpperCamelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def A ( ) -> None: '''simple docstring''' UpperCamelCase = 'THE GERMAN ATTACK' UpperCamelCase = 'SECRET' UpperCamelCase = generate_key(lowercase , lowercase ) UpperCamelCase = cipher_text(lowercase , lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase , lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
3
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _UpperCAmelCase : Union[str, Any] = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __str__( self ) -> str: """simple docstring""" return F'''{self.data}''' class lowercase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" UpperCamelCase = None def __iter__( self ) -> Iterator[T]: """simple docstring""" UpperCamelCase = self.top while node: yield node.data UpperCamelCase = node.next def __str__( self ) -> str: """simple docstring""" return "->".join([str(A_ ) for item in self] ) def __len__( self ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def __UpperCamelCase ( self ) -> bool: """simple docstring""" return self.top is None def __UpperCamelCase ( self , A_ ) -> None: """simple docstring""" UpperCamelCase = Node(A_ ) if not self.is_empty(): UpperCamelCase = self.top UpperCamelCase = node def __UpperCamelCase ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , A_ ) UpperCamelCase = self.top UpperCamelCase = self.top.next return pop_node.data def __UpperCamelCase ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __UpperCamelCase ( self ) -> None: """simple docstring""" UpperCamelCase = None if __name__ == "__main__": from doctest import testmod testmod()
3
from collections.abc import Callable def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def A ( lowercase ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
3
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = IFImgaImgSuperResolutionPipeline __lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} ) __lowercase : Any = PipelineTesterMixin.required_optional_params - {"latents"} def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" return self._get_superresolution_dummy_components() def __UpperCamelCase ( self , A_ , A_=0 ) -> Dict: """simple docstring""" if str(A_ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(A_ ) else: UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self._test_save_load_local() def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = ["input_features", "attention_mask"] def __init__( self , A_=80 , A_=16_000 , A_=0.0 , A_=10 , A_=25 , A_="hamming_window" , A_=3_2768.0 , A_=0.97 , A_=1.0 , A_=True , A_=True , A_=False , **A_ , ) -> Optional[int]: """simple docstring""" super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) UpperCamelCase = feature_size UpperCamelCase = sampling_rate UpperCamelCase = padding_value UpperCamelCase = hop_length UpperCamelCase = win_length UpperCamelCase = frame_signal_scale UpperCamelCase = preemphasis_coeff UpperCamelCase = mel_floor UpperCamelCase = normalize_means UpperCamelCase = normalize_vars UpperCamelCase = win_function UpperCamelCase = return_attention_mask UpperCamelCase = win_length * sampling_rate // 1_000 UpperCamelCase = hop_length * sampling_rate // 1_000 UpperCamelCase = optimal_fft_length(self.sample_size ) UpperCamelCase = (self.n_fft // 2) + 1 def __UpperCamelCase ( self , A_ ) -> np.ndarray: """simple docstring""" if self.win_function == "hamming_window": UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ ) else: UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function ) UpperCamelCase = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) UpperCamelCase = spectrogram( one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" # make sure we normalize float32 arrays if self.normalize_means: UpperCamelCase = x[:input_length].mean(axis=0 ) UpperCamelCase = np.subtract(A_ , A_ ) if self.normalize_vars: UpperCamelCase = x[:input_length].std(axis=0 ) UpperCamelCase = np.divide(A_ , A_ ) if input_length < x.shape[0]: UpperCamelCase = padding_value # make sure array is in float32 UpperCamelCase = x.astype(np.floataa ) return x def __UpperCamelCase ( self , A_ , A_ = None ) -> List[np.ndarray]: """simple docstring""" UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )] def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCamelCase = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase = [raw_speech] # extract fbank features UpperCamelCase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech] # convert into correct format for padding UpperCamelCase = BatchFeature({'input_features': features} ) UpperCamelCase = self.pad( A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , ) # make sure list is in array format UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , A_ ): UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features] UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: UpperCamelCase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCamelCase = ( np.array(A_ , dtype=np.intaa ) if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=A_ ) if return_tensors is not None: UpperCamelCase = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _UpperCAmelCase : str = logging.getLogger(__name__) _UpperCAmelCase : List[str] = "pytorch_model.bin" @dataclasses.dataclass class lowercase : __lowercase : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) __lowercase : Optional[str] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class lowercase : __lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) __lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) __lowercase : Optional[str] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} ) __lowercase : Optional[str] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , ) __lowercase : Optional[List[str]] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class lowercase : __lowercase : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) __lowercase : Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) __lowercase : Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) __lowercase : Optional[int] = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowercase : Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) __lowercase : Optional[bool] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) __lowercase : Optional[bool] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) __lowercase : Optional[bool] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) __lowercase : Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) __lowercase : Optional[int] = dataclasses.field( default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowercase : Optional[int] = dataclasses.field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: UpperCamelCase = dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 UpperCamelCase = int(eval_result * len(lowercase ) ) print(lowercase ) UpperCamelCase = dataset.sort('probability' , reverse=lowercase ) UpperCamelCase = dataset.select(range(lowercase ) ) UpperCamelCase = dataset.remove_columns(['label', 'probability'] ) UpperCamelCase = dataset.rename_column('prediction' , 'label' ) UpperCamelCase = dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} ) UpperCamelCase = dataset.shuffle(seed=args.seed ) UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(lowercase , index=lowercase ) else: dataset.to_json(lowercase ) def A ( lowercase , lowercase , lowercase , lowercase , **lowercase ) -> int: '''simple docstring''' UpperCamelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() UpperCamelCase = STModelArguments(model_name_or_path=lowercase ) UpperCamelCase = STDataArguments(train_file=lowercase , infer_file=lowercase ) UpperCamelCase = STTrainingArguments(output_dir=lowercase ) UpperCamelCase = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(lowercase ).items(): setattr(lowercase , lowercase , lowercase ) for key, value in kwargs.items(): if hasattr(lowercase , lowercase ): setattr(lowercase , lowercase , lowercase ) # Sanity checks UpperCamelCase = {} UpperCamelCase = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None UpperCamelCase = args.train_file UpperCamelCase = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None UpperCamelCase = args.eval_file for key in data_files: UpperCamelCase = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: UpperCamelCase = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) UpperCamelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format UpperCamelCase = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) accelerator.wait_for_everyone() UpperCamelCase = None UpperCamelCase = None UpperCamelCase = 0 UpperCamelCase = False # Show the progress bar UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): UpperCamelCase = data_dir_format(lowercase ) assert os.path.exists(lowercase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 UpperCamelCase = os.path.join(lowercase , 'stage-1' ) UpperCamelCase = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(lowercase , lowercase ): arguments_dict.update({key: value} ) UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase ) if os.path.exists(lowercase ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowercase , lowercase , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowercase ) finetune(**lowercase ) accelerator.wait_for_everyone() assert os.path.exists(lowercase ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowercase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' ) UpperCamelCase = os.path.join(lowercase , 'stage-2' ) # Update arguments_dict UpperCamelCase = model_path UpperCamelCase = data_files['train'] UpperCamelCase = current_output_dir UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase ) if os.path.exists(lowercase ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowercase , lowercase , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowercase ) finetune(**lowercase ) accelerator.wait_for_everyone() assert os.path.exists(lowercase ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowercase ) UpperCamelCase = iteration UpperCamelCase = data_dir_format(iteration + 1 ) UpperCamelCase = AutoConfig.from_pretrained(os.path.join(lowercase , 'best-checkpoint' ) ) UpperCamelCase = config.idalabel UpperCamelCase = os.path.join(lowercase , 'eval_results_best-checkpoint.json' ) UpperCamelCase = os.path.join(lowercase , 'test_results_best-checkpoint.json' ) assert os.path.exists(lowercase ) with open(lowercase , 'r' ) as f: UpperCamelCase = float(json.load(lowercase )[args.eval_metric] ) UpperCamelCase = os.path.join(lowercase , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(lowercase ) # Loading the dataset from local csv or json files. UpperCamelCase = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] UpperCamelCase = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(lowercase , exist_ok=lowercase ) shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(lowercase ): shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) accelerator.wait_for_everyone() UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: UpperCamelCase = eval_result if best_iteration is None: UpperCamelCase = new_iteration UpperCamelCase = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: UpperCamelCase = new_iteration UpperCamelCase = new_eval_result UpperCamelCase = 0 else: if new_eval_result == best_eval_result: UpperCamelCase = new_iteration UpperCamelCase = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: UpperCamelCase = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , lowercase ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
3
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = '' for i in table: res += inp[i - 1] return res def A ( lowercase ) -> Dict: '''simple docstring''' return data[1:] + data[0] def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' UpperCamelCase = '' for i in range(len(lowercase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = int('0b' + data[0] + data[-1] , 2 ) UpperCamelCase = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase = message[:4] UpperCamelCase = message[4:] UpperCamelCase = apply_table(lowercase , lowercase ) UpperCamelCase = xor(lowercase , lowercase ) UpperCamelCase = apply_sbox(lowercase , temp[:4] ) # noqa: E741 UpperCamelCase = apply_sbox(lowercase , temp[4:] ) UpperCamelCase = '0' * (2 - len(lowercase )) + l # noqa: E741 UpperCamelCase = '0' * (2 - len(lowercase )) + r UpperCamelCase = apply_table(l + r , lowercase ) UpperCamelCase = xor(lowercase , lowercase ) return temp + right if __name__ == "__main__": _UpperCAmelCase : str = input("Enter 10 bit key: ") _UpperCAmelCase : str = input("Enter 8 bit message: ") _UpperCAmelCase : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9] _UpperCAmelCase : Optional[Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] _UpperCAmelCase : List[Any] = [2, 4, 3, 1] _UpperCAmelCase : Any = [2, 6, 3, 1, 4, 8, 5, 7] _UpperCAmelCase : Dict = [4, 1, 3, 5, 7, 2, 8, 6] _UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] _UpperCAmelCase : Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] _UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation _UpperCAmelCase : str = apply_table(key, paa_table) _UpperCAmelCase : Tuple = temp[:5] _UpperCAmelCase : Optional[int] = temp[5:] _UpperCAmelCase : List[Any] = left_shift(left) _UpperCAmelCase : Union[str, Any] = left_shift(right) _UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) _UpperCAmelCase : Optional[Any] = left_shift(left) _UpperCAmelCase : Optional[int] = left_shift(right) _UpperCAmelCase : Tuple = left_shift(left) _UpperCAmelCase : int = left_shift(right) _UpperCAmelCase : Optional[Any] = apply_table(left + right, pa_table) # encryption _UpperCAmelCase : int = apply_table(message, IP) _UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) _UpperCAmelCase : Any = temp[4:] + temp[:4] _UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) _UpperCAmelCase : Optional[Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption _UpperCAmelCase : List[str] = apply_table(CT, IP) _UpperCAmelCase : Optional[Any] = function(expansion, sa, sa, keya, temp) _UpperCAmelCase : Any = temp[4:] + temp[:4] _UpperCAmelCase : Any = function(expansion, sa, sa, keya, temp) _UpperCAmelCase : List[Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
3
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = (DDPMScheduler,) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
1
def A ( lowercase ) -> int: '''simple docstring''' stooge(lowercase , 0 , len(lowercase ) - 1 ) return arr def A ( lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase , UpperCamelCase = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase , lowercase , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase , i + t , (lowercase) ) # Recursively sort first 2/3 elements stooge(lowercase , lowercase , (h - t) ) if __name__ == "__main__": _UpperCAmelCase : int = input("Enter numbers separated by a comma:\n").strip() _UpperCAmelCase : Optional[Any] = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
3
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
1
from ...processing_utils import ProcessorMixin class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = ["image_processor", "feature_extractor"] __lowercase : List[Any] = "TvltImageProcessor" __lowercase : Union[str, Any] = "TvltFeatureExtractor" def __init__( self , A_ , A_ ) -> Dict: """simple docstring""" super().__init__(image_processor=A_ , feature_extractor=A_ ) UpperCamelCase = image_processor UpperCamelCase = feature_extractor def __call__( self , A_=None , A_=None , A_=None , A_=None , A_=False , A_=False , *A_ , **A_ , ) -> Optional[Any]: """simple docstring""" if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.' ) UpperCamelCase = None if images is not None: UpperCamelCase = self.image_processor(A_ , mask_pixel=A_ , *A_ , **A_ ) if images_mixed is not None: UpperCamelCase = self.image_processor(A_ , is_mixed=A_ , *A_ , **A_ ) if audio is not None: UpperCamelCase = self.feature_extractor( A_ , *A_ , sampling_rate=A_ , mask_audio=A_ , **A_ ) UpperCamelCase = {} if audio is not None: output_dict.update(A_ ) if images is not None: output_dict.update(A_ ) if images_mixed_dict is not None: output_dict.update(A_ ) return output_dict @property def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processor.model_input_names UpperCamelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
3
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[8, 16, 32, 64] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=1 , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(A_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any: """simple docstring""" UpperCamelCase = BitModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict: """simple docstring""" UpperCamelCase = BitBackbone(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : Any = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) __lowercase : Any = False __lowercase : Any = False __lowercase : int = False __lowercase : Dict = False __lowercase : Any = False def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return @unittest.skip(reason='Bit does not output attentions' ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=A_ ) for name, module in model.named_modules(): if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" pass def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A ( ) -> Tuple: '''simple docstring''' UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) # verify the logits UpperCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : List[Any] = BitConfig __lowercase : int = False def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = BitModelTester(self )
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = "data2vec-text" def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class lowercase ( _SCREAMING_SNAKE_CASE ): @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
3
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Any = ["audio_values", "audio_mask"] def __init__( self , A_=2_048 , A_=1 , A_=[16, 16] , A_=128 , A_=44_100 , A_=86 , A_=2_048 , A_=0.0 , **A_ , ) -> List[str]: """simple docstring""" super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) UpperCamelCase = spectrogram_length UpperCamelCase = num_channels UpperCamelCase = patch_size UpperCamelCase = feature_size // self.patch_size[1] UpperCamelCase = n_fft UpperCamelCase = sampling_rate // hop_length_to_sampling_rate UpperCamelCase = sampling_rate UpperCamelCase = padding_value UpperCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A_ , norm='slaney' , mel_scale='slaney' , ).T def __UpperCamelCase ( self , A_ ) -> np.ndarray: """simple docstring""" UpperCamelCase = spectrogram( A_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) UpperCamelCase = log_spec[:, :-1] UpperCamelCase = log_spec - 20.0 UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCamelCase = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCamelCase = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCamelCase = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCamelCase = padded_audio_features * self.padding_value for i in range(len(A_ ) ): UpperCamelCase = audio_features[i] UpperCamelCase = feature # return as BatchFeature if return_attention_mask: UpperCamelCase = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: UpperCamelCase = {'audio_values': padded_audio_features} UpperCamelCase = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
3
from random import shuffle import tensorflow as tf from numpy import array def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) assert noofclusters < len(lowercase ) # Find out the dimensionality UpperCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCamelCase = list(range(len(lowercase ) ) ) shuffle(lowercase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCamelCase = tf.placeholder('float64' , [dim] ) UpperCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase , lowercase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCamelCase = tf.placeholder('int32' ) UpperCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase , lowercase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCamelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCamelCase = tf.reduce_mean(lowercase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCamelCase = tf.placeholder('float' , [noofclusters] ) UpperCamelCase = tf.argmin(lowercase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCamelCase = 100 for _ in range(lowercase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase ) ): UpperCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCamelCase = [ sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCamelCase = sess.run( lowercase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase ): # Collect all the vectors assigned to this cluster UpperCamelCase = [ vectors[i] for i in range(len(lowercase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCamelCase = sess.run( lowercase , feed_dict={mean_input: array(lowercase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCamelCase = sess.run(lowercase ) UpperCamelCase = sess.run(lowercase ) return centroids, assignments
3
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def A ( lowercase=32 , lowercase=10 , lowercase=100 , lowercase=1_026 , lowercase=True , lowercase="data/tokenized_stories_train_wikitext103.jbl" , lowercase="igf_context_pairs.jbl" , ) -> str: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set UpperCamelCase , UpperCamelCase = generate_datasets( lowercase , lowercase , number=lowercase , min_len=1_026 , trim=lowercase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # load pretrained model UpperCamelCase = load_gpta('gpt2' ).to(lowercase ) print('computing perplexity on objective set' ) UpperCamelCase = compute_perplexity(lowercase , lowercase , lowercase ).item() print('perplexity on objective set:' , lowercase ) # collect igf pairs and save to file demo.jbl collect_objective_set(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def A ( lowercase , lowercase=15 , lowercase=128 , lowercase=100 , lowercase="igf_model.pt" , ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) # Load pre-trained model UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2' ) # Initialize secondary learner to use embedding weights of model UpperCamelCase = SecondaryLearner(lowercase ) # Train secondary learner UpperCamelCase = train_secondary_learner( lowercase , lowercase , max_epochs=lowercase , batch_size=lowercase , eval_freq=100 , igf_model_path=lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def A ( lowercase , lowercase , lowercase , lowercase=32 , lowercase=1_000 , lowercase=16 , lowercase=1.0 , lowercase=recopy_gpta , lowercase=None , lowercase=10 , lowercase="gpt2_finetuned.pt" , ) -> List[str]: '''simple docstring''' UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) UpperCamelCase = RandomSampler(lowercase ) UpperCamelCase = DataLoader(lowercase , sampler=lowercase ) UpperCamelCase = max_steps // (len(lowercase )) + 1 UpperCamelCase = 0 UpperCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = recopy_model(lowercase , lowercase , lowercase ) model.train() if secondary_learner is not None: secondary_learner.to(lowercase ) secondary_learner.eval() UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = [] UpperCamelCase = [] # Compute the performance of the transformer model at the beginning UpperCamelCase = compute_perplexity(lowercase , lowercase , lowercase ) test_perps.append(lowercase ) print('Test perplexity, step' , lowercase , ':' , lowercase ) for epoch in range(int(lowercase ) ): for step, example in enumerate(lowercase ): torch.cuda.empty_cache() UpperCamelCase = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCamelCase = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase = model(lowercase , labels=lowercase ) UpperCamelCase = True if secondary_learner is not None: UpperCamelCase = secondary_learner.forward( torch.tensor(lowercase , dtype=torch.long , device=lowercase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(lowercase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase = -1 if predicted_q < threshold: UpperCamelCase = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCamelCase = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase = compute_perplexity(lowercase , lowercase , lowercase ) test_perps.append(lowercase ) print('Test perplexity, step' , lowercase , ':' , lowercase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , lowercase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def A ( ) -> str: '''simple docstring''' UpperCamelCase = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' ) # Required parameters parser.add_argument( '--data_dir' , default=lowercase , type=lowercase , required=lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=lowercase , type=lowercase , required=lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=lowercase , default=lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=lowercase , default=lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=lowercase , type=lowercase , required=lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=lowercase , type=lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=lowercase , default=lowercase , help='A seed for reproducible training.' ) parser.add_argument( '--context_len' , default=32 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=lowercase , help='secondary model evaluation is triggered at eval_freq' ) parser.add_argument('--max_steps' , default=1_000 , type=lowercase , help='To calculate training epochs' ) parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=lowercase , help='batch size of training data of language model(gpt2) ' ) parser.add_argument( '--eval_interval' , default=10 , type=lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=lowercase , help='The number of examples split to be used as objective_set/test_data' ) parser.add_argument( '--min_len' , default=1_026 , type=lowercase , help='The minimum length of the article to be used as objective set' ) parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=lowercase , help='number of epochs to train secondary learner' ) parser.add_argument('--trim' , default=lowercase , type=lowercase , help='truncate the example if it exceeds context length' ) parser.add_argument( '--threshold' , default=1.0 , type=lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=lowercase , help='finetuned_model_name' ) parser.add_argument( '--recopy_model' , default=lowercase , type=lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase = joblib.load('data/IGF_values.jbl' ) # Train secondary learner UpperCamelCase = training_secondary_learner( lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase , UpperCamelCase = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1_026 , trim=lowercase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( lowercase , lowercase , lowercase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=lowercase , secondary_learner=lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
3
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _UpperCAmelCase : Tuple = _symbol_database.Default() _UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _UpperCAmelCase : int = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _UpperCAmelCase : Optional[Any] = 45 _UpperCAmelCase : Any = 1_581 _UpperCAmelCase : Tuple = 1_517 _UpperCAmelCase : List[str] = 1_570 _UpperCAmelCase : int = 1_584 _UpperCAmelCase : List[Any] = 1_793 _UpperCAmelCase : Optional[int] = 1_795 _UpperCAmelCase : Any = 1_916 _UpperCAmelCase : Tuple = 1_864 _UpperCAmelCase : List[Any] = 1_905 _UpperCAmelCase : Union[str, Any] = 1_919 _UpperCAmelCase : str = 2_429 _UpperCAmelCase : Any = 2_208 _UpperCAmelCase : Dict = 2_418 _UpperCAmelCase : Optional[Any] = 2_323 _UpperCAmelCase : Tuple = 2_407 # @@protoc_insertion_point(module_scope)
3
1
def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
3
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 try: UpperCamelCase = tempfile.mktemp() with open(A_ , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ ) UpperCamelCase = AlbertTokenizer.from_pretrained(A_ ) finally: os.remove(A_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ ) UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowercase ( unittest.TestCase ): __lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def __UpperCamelCase ( cls ) -> Tuple: """simple docstring""" UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def __UpperCamelCase ( cls ) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def __UpperCamelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __UpperCamelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = CustomTokenizer(A_ ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) UpperCamelCase = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def __UpperCamelCase ( self ) -> int: """simple docstring""" # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCamelCase = Trie() UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(A_ , ['AB', 'C'] )
3
1
def A ( lowercase ) -> bool: '''simple docstring''' if not isinstance(lowercase , lowercase ): UpperCamelCase = f'''Input value of [number={number}] must be an integer''' raise TypeError(lowercase ) if number < 0: return False UpperCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
3
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if issubclass(lowercase , lowercase ): UpperCamelCase = parquet_path elif issubclass(lowercase , lowercase ): UpperCamelCase = [parquet_path] UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple: '''simple docstring''' assert isinstance(lowercase , lowercase ) for split in splits: UpperCamelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if split: UpperCamelCase = {split: parquet_path} else: UpperCamelCase = 'train' UpperCamelCase = {'train': parquet_path, 'test': parquet_path} UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) UpperCamelCase = pf.read() assert dataset.data.table == output_table def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' ) UpperCamelCase = {'image': [image_path]} UpperCamelCase = Features({'image': Image()} ) UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase ) UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' assert get_writer_batch_size(lowercase ) == expected
3
1
import numpy as np _UpperCAmelCase : Optional[Any] = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class lowercase : def __init__( self ) -> None: """simple docstring""" UpperCamelCase = np.array(A_ ) def __UpperCamelCase ( self , A_ ) -> np.ndarray: """simple docstring""" UpperCamelCase , UpperCamelCase = np.where(letter == self.SQUARE ) UpperCamelCase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def __UpperCamelCase ( self , A_ , A_ ) -> str: """simple docstring""" UpperCamelCase = self.SQUARE[indexa - 1, indexa - 1] return letter def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = message.lower() UpperCamelCase = message.replace(' ' , '' ) UpperCamelCase = message.replace('j' , 'i' ) UpperCamelCase = np.empty((2, len(A_ )) ) for letter_index in range(len(A_ ) ): UpperCamelCase = self.letter_to_numbers(message[letter_index] ) UpperCamelCase = numbers[0] UpperCamelCase = numbers[1] UpperCamelCase = first_step.reshape(2 * len(A_ ) ) UpperCamelCase = '' for numbers_index in range(len(A_ ) ): UpperCamelCase = int(second_step[numbers_index * 2] ) UpperCamelCase = int(second_step[(numbers_index * 2) + 1] ) UpperCamelCase = self.numbers_to_letter(A_ , A_ ) UpperCamelCase = encoded_message + letter return encoded_message def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = message.lower() message.replace(' ' , '' ) UpperCamelCase = np.empty(2 * len(A_ ) ) for letter_index in range(len(A_ ) ): UpperCamelCase = self.letter_to_numbers(message[letter_index] ) UpperCamelCase = numbers[0] UpperCamelCase = numbers[1] UpperCamelCase = first_step.reshape((2, len(A_ )) ) UpperCamelCase = '' for numbers_index in range(len(A_ ) ): UpperCamelCase = int(second_step[0, numbers_index] ) UpperCamelCase = int(second_step[1, numbers_index] ) UpperCamelCase = self.numbers_to_letter(A_ , A_ ) UpperCamelCase = decoded_message + letter return decoded_message
3
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size if size is not None else {'height': 18, 'width': 20} UpperCamelCase = do_thumbnail UpperCamelCase = do_align_axis UpperCamelCase = do_pad UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = DonutImageProcessingTester(self ) @property def __UpperCamelCase ( self ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) self.assertTrue(hasattr(A_ , 'do_thumbnail' ) ) self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def __UpperCamelCase ( self ) -> int: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Any: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
3
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
3
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _UpperCAmelCase : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512} def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char UpperCamelCase = set(lowercase ) return pairs class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]: """simple docstring""" super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ ) UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ ) UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ ) if "\n" in token: UpperCamelCase = token.replace('\n' , ' __newln__' ) UpperCamelCase = token.split(' ' ) UpperCamelCase = [] for token in tokens: if not len(A_ ): continue UpperCamelCase = token.lower() UpperCamelCase = tuple(A_ ) UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCamelCase = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) new_word.extend(word[i:j] ) UpperCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = '@@ '.join(A_ ) UpperCamelCase = word[:-4] UpperCamelCase = word words.append(A_ ) return " ".join(A_ ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = re.findall(r'\S+\n?' , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) ) return split_tokens def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" return self.decoder.get(A_ , self.unk_token ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip() return out_string def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file
3
1
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[Any] = ["input_values", "padding_mask"] def __init__( self , A_ = 1 , A_ = 24_000 , A_ = 0.0 , A_ = None , A_ = None , **A_ , ) -> Union[str, Any]: """simple docstring""" super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) UpperCamelCase = chunk_length_s UpperCamelCase = overlap @property def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , A_ , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs UpperCamelCase = True UpperCamelCase = bool( isinstance(A_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): UpperCamelCase = raw_audio.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase = [np.asarray(A_ ).T] # verify inputs are valid for idx, example in enumerate(A_ ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) UpperCamelCase = None UpperCamelCase = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: UpperCamelCase = min(array.shape[0] for array in raw_audio ) UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) ) UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: UpperCamelCase = max(array.shape[0] for array in raw_audio ) UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) ) UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length UpperCamelCase = 'max_length' else: UpperCamelCase = input_values # normal padding on batch if padded_inputs is None: UpperCamelCase = self.pad( A_ , max_length=A_ , truncation=A_ , padding=A_ , return_attention_mask=A_ , ) if padding: UpperCamelCase = padded_inputs.pop('attention_mask' ) UpperCamelCase = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: UpperCamelCase = example[..., None] input_values.append(example.T ) UpperCamelCase = input_values if return_tensors is not None: UpperCamelCase = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
3
def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
3
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _UpperCAmelCase : Tuple = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] _UpperCAmelCase : Any = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] _UpperCAmelCase : str = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) _UpperCAmelCase : Tuple = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) _UpperCAmelCase : List[Any] = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' for tf_name, hf_name in patterns: UpperCamelCase = k.replace(lowercase , lowercase ) return k def A ( lowercase , lowercase ) -> BigBirdPegasusForConditionalGeneration: '''simple docstring''' UpperCamelCase = BigBirdPegasusConfig(**lowercase ) UpperCamelCase = BigBirdPegasusForConditionalGeneration(lowercase ) UpperCamelCase = torch_model.state_dict() UpperCamelCase = {} # separating decoder weights UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): UpperCamelCase = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE] if any(lowercase ): continue UpperCamelCase = DECODER_PATTERNS UpperCamelCase = rename_state_dict_key(lowercase , lowercase ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): UpperCamelCase = v.T UpperCamelCase = torch.from_numpy(lowercase ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): UpperCamelCase = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE] if any(lowercase ): continue UpperCamelCase = REMAINING_PATTERNS UpperCamelCase = rename_state_dict_key(lowercase , lowercase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): UpperCamelCase = v.T UpperCamelCase = torch.from_numpy(lowercase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' UpperCamelCase = mapping['model.embed_positions.weight'] UpperCamelCase = mapping.pop('model.embed_positions.weight' ) UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(lowercase , strict=lowercase ) UpperCamelCase = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def A ( lowercase ) -> Dict: '''simple docstring''' UpperCamelCase = tf.train.list_variables(lowercase ) UpperCamelCase = {} UpperCamelCase = ['global_step'] for name, shape in tqdm(lowercase , desc='converting tf checkpoint to dict' ): UpperCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue UpperCamelCase = tf.train.load_variable(lowercase , lowercase ) UpperCamelCase = array return tf_weights def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = get_tf_weights_as_numpy(lowercase ) UpperCamelCase = convert_bigbird_pegasus(lowercase , lowercase ) torch_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") _UpperCAmelCase : str = parser.parse_args() _UpperCAmelCase : Optional[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
3
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
1
from PIL import Image def A ( lowercase , lowercase ) -> Image: '''simple docstring''' UpperCamelCase = (259 * (level + 255)) / (255 * (259 - level)) def contrast(lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(lowercase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 _UpperCAmelCase : Dict = change_contrast(img, 170) cont_img.save("image_data/lena_high_contrast.png", format="png")
3
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A ( lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') UpperCamelCase = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(lowercase ): os.makedirs(lowercase ) UpperCamelCase = model.state_dict() def to_tf_var_name(lowercase ): for patt, repl in iter(lowercase ): UpperCamelCase = name.replace(lowercase , lowercase ) return f'''bert/{name}''' def create_tf_var(lowercase , lowercase , lowercase ): UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype ) UpperCamelCase = tf.get_variable(dtype=lowercase , shape=tensor.shape , name=lowercase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowercase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase = to_tf_var_name(lowercase ) UpperCamelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCamelCase = torch_tensor.T UpperCamelCase = create_tf_var(tensor=lowercase , name=lowercase , session=lowercase ) tf.keras.backend.set_value(lowercase , lowercase ) UpperCamelCase = session.run(lowercase ) print(f'''Successfully created {tf_name}: {np.allclose(lowercase , lowercase )}''' ) UpperCamelCase = tf.train.Saver(tf.trainable_variables() ) saver.save(lowercase , os.path.join(lowercase , model_name.replace('-' , '_' ) + '.ckpt' ) ) def A ( lowercase=None ) -> List[str]: '''simple docstring''' UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--model_name' , type=lowercase , required=lowercase , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=lowercase , default=lowercase , required=lowercase , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=lowercase , required=lowercase , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=lowercase , required=lowercase , help='Directory in which to save tensorflow model' ) UpperCamelCase = parser.parse_args(lowercase ) UpperCamelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
1
from __future__ import annotations from functools import lru_cache from math import ceil _UpperCAmelCase : List[Any] = 100 _UpperCAmelCase : List[str] = set(range(3, NUM_PRIMES, 2)) primes.add(2) _UpperCAmelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A ( lowercase ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase = set() UpperCamelCase = 42 UpperCamelCase = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( lowercase = 5_000 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , lowercase ): if len(partition(lowercase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
3
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
1
from __future__ import annotations _UpperCAmelCase : Any = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _UpperCAmelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def A ( lowercase ) -> list[float]: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = len(lowercase ) for i in range(lowercase ): UpperCamelCase = -1 for j in range(i + 1 , lowercase ): if arr[i] < arr[j]: UpperCamelCase = arr[j] break result.append(lowercase ) return result def A ( lowercase ) -> list[float]: '''simple docstring''' UpperCamelCase = [] for i, outer in enumerate(lowercase ): UpperCamelCase = -1 for inner in arr[i + 1 :]: if outer < inner: UpperCamelCase = inner break result.append(lowercase ) return result def A ( lowercase ) -> list[float]: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = [] UpperCamelCase = [-1] * arr_size for index in reversed(range(lowercase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: UpperCamelCase = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _UpperCAmelCase : List[str] = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
3
from string import ascii_uppercase _UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)} _UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase)) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = 0 while True: if x == i: UpperCamelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def A ( ) -> None: '''simple docstring''' UpperCamelCase = 'THE GERMAN ATTACK' UpperCamelCase = 'SECRET' UpperCamelCase = generate_key(lowercase , lowercase ) UpperCamelCase = cipher_text(lowercase , lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase , lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
3
1
import qiskit def A ( lowercase = 2 ) -> qiskit.result.counts.Counts: '''simple docstring''' UpperCamelCase = qubits # Using Aer's simulator UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register UpperCamelCase = qiskit.QuantumCircuit(lowercase , lowercase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , lowercase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , lowercase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(lowercase ) ) , list(range(lowercase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator UpperCamelCase = qiskit.execute(lowercase , lowercase , shots=1_000 ) return job.result().get_counts(lowercase ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
3
from collections.abc import Callable def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def A ( lowercase ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
3
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' ) UpperCamelCase = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ), ] ) UpperCamelCase = transform(lowercase ).unsqueeze(0 ).to(lowercase ) return image def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' if "visual_encoder" in key: UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase ) if "blocks" in key: UpperCamelCase = re.sub(R'blocks' , 'layers' , lowercase ) if "attn" in key: UpperCamelCase = re.sub(R'attn' , 'self_attn' , lowercase ) if "norm1" in key: UpperCamelCase = re.sub(R'norm1' , 'layer_norm1' , lowercase ) if "norm2" in key: UpperCamelCase = re.sub(R'norm2' , 'layer_norm2' , lowercase ) if "encoder.norm" in key: UpperCamelCase = re.sub(R'encoder.norm' , 'post_layernorm' , lowercase ) if "encoder.patch_embed.proj" in key: UpperCamelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase ) if "encoder.pos_embed" in key: UpperCamelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase ) if "encoder.cls_token" in key: UpperCamelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase ) if "self_attn" in key: UpperCamelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase ) return key @torch.no_grad() def A ( lowercase , lowercase=None ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = BlipConfig.from_pretrained(lowercase ) else: UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) UpperCamelCase = BlipForConditionalGeneration(lowercase ).eval() UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' UpperCamelCase = blip_decoder(pretrained=lowercase , image_size=384 , vit='base' ) UpperCamelCase = pt_model.eval() UpperCamelCase = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(lowercase ) UpperCamelCase = rename_key(lowercase ) UpperCamelCase = value hf_model.load_state_dict(lowercase ) UpperCamelCase = 384 UpperCamelCase = load_demo_image(image_size=lowercase , device='cpu' ) UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCamelCase = tokenizer(['a picture of'] ).input_ids UpperCamelCase = hf_model.generate(lowercase , lowercase ) assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] UpperCamelCase = hf_model.generate(lowercase ) assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCamelCase = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) UpperCamelCase = blip_vqa(pretrained=lowercase , image_size=lowercase , vit='base' ) vqa_model.eval() UpperCamelCase = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(lowercase ) UpperCamelCase = rename_key(lowercase ) UpperCamelCase = value UpperCamelCase = BlipForQuestionAnswering(lowercase ) hf_vqa_model.load_state_dict(lowercase ) UpperCamelCase = ['How many dogs are in this image?'] UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids UpperCamelCase = hf_vqa_model.generate(lowercase , lowercase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' UpperCamelCase = blip_itm(pretrained=lowercase , image_size=lowercase , vit='base' ) itm_model.eval() UpperCamelCase = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(lowercase ) UpperCamelCase = rename_key(lowercase ) UpperCamelCase = value UpperCamelCase = BlipForImageTextRetrieval(lowercase ) UpperCamelCase = ['A picture of a woman with a dog sitting in a beach'] UpperCamelCase = tokenizer( lowercase , return_tensors='pt' , padding='max_length' , truncation=lowercase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(lowercase ) hf_itm_model.eval() UpperCamelCase = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase ) UpperCamelCase = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase ) assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") _UpperCAmelCase : Optional[int] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
3
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def A ( lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def A ( lowercase , lowercase , lowercase , lowercase , lowercase=True ) -> List[Any]: '''simple docstring''' model.train() UpperCamelCase = model(lowercase ) UpperCamelCase = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def A ( lowercase , lowercase=False ) -> Union[str, Any]: '''simple docstring''' set_seed(42 ) UpperCamelCase = RegressionModel() UpperCamelCase = deepcopy(lowercase ) UpperCamelCase = RegressionDataset(length=80 ) UpperCamelCase = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 ) UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 ) UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 ) # Make a copy of `model` if sched: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase ) # Use a single batch UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )] def A ( lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase ) # Use a single batch UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )] def A ( lowercase=False , lowercase=False ) -> str: '''simple docstring''' UpperCamelCase = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): UpperCamelCase , UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def A ( lowercase=False , lowercase=False ) -> List[str]: '''simple docstring''' UpperCamelCase = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): UpperCamelCase , UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def A ( ) -> int: '''simple docstring''' UpperCamelCase = Accelerator() UpperCamelCase = RegressionDataset(length=80 ) UpperCamelCase = DataLoader(lowercase , batch_size=16 ) UpperCamelCase = RegressionDataset(length=96 ) UpperCamelCase = DataLoader(lowercase , batch_size=16 ) UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def A ( ) -> Optional[int]: '''simple docstring''' UpperCamelCase = Accelerator() UpperCamelCase = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def A ( lowercase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
from math import factorial def A ( lowercase = 20 ) -> int: '''simple docstring''' UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCamelCase = n // 2 return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: _UpperCAmelCase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
3
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
from __future__ import annotations from math import pow, sqrt def A ( lowercase , lowercase , lowercase ) -> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(lowercase , 2 ) - pow(lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(lowercase , 2 ) - pow(lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(lowercase , 2 ) + pow(lowercase , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
3
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = (DDPMScheduler,) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_blip_2": [ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", ], "processing_blip_2": ["Blip2Processor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", "Blip2ForConditionalGeneration", "Blip2VisionModel", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys _UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
1
from __future__ import annotations import bisect def A ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: '''simple docstring''' if hi < 0: UpperCamelCase = len(lowercase ) while lo < hi: UpperCamelCase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: UpperCamelCase = mid + 1 else: UpperCamelCase = mid return lo def A ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: '''simple docstring''' if hi < 0: UpperCamelCase = len(lowercase ) while lo < hi: UpperCamelCase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: UpperCamelCase = mid + 1 else: UpperCamelCase = mid return lo def A ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def A ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def A ( lowercase , lowercase ) -> int | None: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = len(lowercase ) - 1 while left <= right: UpperCamelCase = left + (right - left) // 2 UpperCamelCase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: UpperCamelCase = midpoint - 1 else: UpperCamelCase = midpoint + 1 return None def A ( lowercase , lowercase ) -> int | None: '''simple docstring''' UpperCamelCase = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def A ( lowercase , lowercase , lowercase , lowercase ) -> int | None: '''simple docstring''' if right < left: return None UpperCamelCase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = input("Enter numbers separated by comma:\n").strip() _UpperCAmelCase : List[Any] = sorted(int(item) for item in user_input.split(",")) _UpperCAmelCase : Tuple = int(input("Enter a single number to be found in the list:\n")) _UpperCAmelCase : Dict = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
3
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
3
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase = args.log_outputs UpperCamelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric UpperCamelCase = load_metric('wer' ) UpperCamelCase = load_metric('cer' ) # compute metrics UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'] ) UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results UpperCamelCase = f'''WER: {wer_result}\nCER: {cer_result}''' print(lowercase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(lowercase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCamelCase = f'''log_{dataset_id}_predictions.txt''' UpperCamelCase = f'''log_{dataset_id}_targets.txt''' with open(lowercase , 'w' ) as p, open(lowercase , 'w' ) as t: # mapping function to write output def write_to_file(lowercase , lowercase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(lowercase , with_indices=lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCamelCase = re.sub(lowercase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCamelCase = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: UpperCamelCase = ' '.join(text.split(lowercase ) ) return text def A ( lowercase ) -> Dict: '''simple docstring''' UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCamelCase = feature_extractor.sampling_rate # resample audio UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=lowercase ) ) # load eval pipeline if args.device is None: UpperCamelCase = 0 if torch.cuda.is_available() else -1 UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase ): UpperCamelCase = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCamelCase = prediction['text'] UpperCamelCase = normalize_text(batch['sentence'] ) return batch # run inference on all examples UpperCamelCase = dataset.map(lowercase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase , lowercase ) if __name__ == "__main__": _UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) _UpperCAmelCase : Any = parser.parse_args() main(args)
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = "data2vec-text" def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class lowercase ( _SCREAMING_SNAKE_CASE ): @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
3
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = BarthezTokenizer __lowercase : Dict = BarthezTokenizerFast __lowercase : Optional[Any] = True __lowercase : Dict = True def __UpperCamelCase ( self ) -> str: """simple docstring""" super().setUp() UpperCamelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ ) UpperCamelCase = tokenizer def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = '<pad>' UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(A_ ) , 101_122 ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] UpperCamelCase = [0, 57, 3_018, 70_307, 91, 2] UpperCamelCase = self.tokenizer( A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors='pt' ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = 'I was born in 92000, and this is falsé.' UpperCamelCase = tokenizer.tokenize(A_ ) UpperCamelCase = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = tokenizer.encode(A_ ) UpperCamelCase = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) @slow def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # fmt: off UpperCamelCase = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. UpperCamelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=A_ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=A_ , )
3
from random import shuffle import tensorflow as tf from numpy import array def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) assert noofclusters < len(lowercase ) # Find out the dimensionality UpperCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCamelCase = list(range(len(lowercase ) ) ) shuffle(lowercase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCamelCase = tf.placeholder('float64' , [dim] ) UpperCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase , lowercase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCamelCase = tf.placeholder('int32' ) UpperCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase , lowercase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCamelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCamelCase = tf.reduce_mean(lowercase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCamelCase = tf.placeholder('float' , [noofclusters] ) UpperCamelCase = tf.argmin(lowercase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCamelCase = 100 for _ in range(lowercase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase ) ): UpperCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCamelCase = [ sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCamelCase = sess.run( lowercase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase ): # Collect all the vectors assigned to this cluster UpperCamelCase = [ vectors[i] for i in range(len(lowercase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCamelCase = sess.run( lowercase , feed_dict={mean_input: array(lowercase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCamelCase = sess.run(lowercase ) UpperCamelCase = sess.run(lowercase ) return centroids, assignments
3
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _UpperCAmelCase : Optional[Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def A ( lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = {} state_dict.pop('pixel_mean' , lowercase ) state_dict.pop('pixel_std' , lowercase ) UpperCamelCase = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCamelCase = key.replace(lowercase , lowercase ) if re.match(lowercase , lowercase ): UpperCamelCase = int(re.match(lowercase , lowercase ).group(2 ) ) if layer_nb == 0: UpperCamelCase = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: UpperCamelCase = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: UpperCamelCase = key.replace('layers.2' , 'proj_out' ) UpperCamelCase = value UpperCamelCase = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def A ( lowercase , lowercase , lowercase , lowercase="ybelkada/segment-anything" ) -> List[Any]: '''simple docstring''' UpperCamelCase = hf_hub_download(lowercase , f'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: UpperCamelCase = SamConfig() elif "sam_vit_l" in model_name: UpperCamelCase = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) UpperCamelCase = SamConfig( vision_config=lowercase , ) elif "sam_vit_h" in model_name: UpperCamelCase = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) UpperCamelCase = SamConfig( vision_config=lowercase , ) UpperCamelCase = torch.load(lowercase , map_location='cpu' ) UpperCamelCase = replace_keys(lowercase ) UpperCamelCase = SamImageProcessor() UpperCamelCase = SamProcessor(image_processor=lowercase ) UpperCamelCase = SamModel(lowercase ) hf_model.load_state_dict(lowercase ) UpperCamelCase = hf_model.to('cuda' ) UpperCamelCase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' ) UpperCamelCase = [[[400, 650]]] UpperCamelCase = [[1]] UpperCamelCase = processor(images=np.array(lowercase ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): UpperCamelCase = hf_model(**lowercase ) UpperCamelCase = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 UpperCamelCase = processor( images=np.array(lowercase ) , input_points=lowercase , input_labels=lowercase , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): UpperCamelCase = hf_model(**lowercase ) UpperCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 UpperCamelCase = ((75, 275, 1_725, 850),) UpperCamelCase = processor(images=np.array(lowercase ) , input_boxes=lowercase , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): UpperCamelCase = hf_model(**lowercase ) UpperCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. UpperCamelCase = [[[400, 650], [800, 650]]] UpperCamelCase = [[1, 1]] UpperCamelCase = processor( images=np.array(lowercase ) , input_points=lowercase , input_labels=lowercase , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): UpperCamelCase = hf_model(**lowercase ) UpperCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": _UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() _UpperCAmelCase : Dict = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) _UpperCAmelCase : int = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
3
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _UpperCAmelCase : Tuple = _symbol_database.Default() _UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _UpperCAmelCase : int = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _UpperCAmelCase : int = None _UpperCAmelCase : List[str] = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _UpperCAmelCase : Optional[Any] = 45 _UpperCAmelCase : Any = 1_581 _UpperCAmelCase : Tuple = 1_517 _UpperCAmelCase : List[str] = 1_570 _UpperCAmelCase : int = 1_584 _UpperCAmelCase : List[Any] = 1_793 _UpperCAmelCase : Optional[int] = 1_795 _UpperCAmelCase : Any = 1_916 _UpperCAmelCase : Tuple = 1_864 _UpperCAmelCase : List[Any] = 1_905 _UpperCAmelCase : Union[str, Any] = 1_919 _UpperCAmelCase : str = 2_429 _UpperCAmelCase : Any = 2_208 _UpperCAmelCase : Dict = 2_418 _UpperCAmelCase : Optional[Any] = 2_323 _UpperCAmelCase : Tuple = 2_407 # @@protoc_insertion_point(module_scope)
3
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : str = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> List[Any]: '''simple docstring''' if config_path is not None: UpperCamelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: UpperCamelCase = UniSpeechSatConfig() UpperCamelCase = '' if is_finetuned: UpperCamelCase = UniSpeechSatForCTC(lowercase ) else: UpperCamelCase = UniSpeechSatForPreTraining(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # A mock response for an HTTP head request to emulate server down UpperCamelCase = mock.Mock() UpperCamelCase = 500 UpperCamelCase = {} UpperCamelCase = HTTPError UpperCamelCase = {} # Download this model to make sure it's in the cache. UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head: UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 try: UpperCamelCase = tempfile.mktemp() with open(A_ , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ ) UpperCamelCase = AlbertTokenizer.from_pretrained(A_ ) finally: os.remove(A_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ ) UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" # This test is for deprecated behavior and can be removed in v5 UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowercase ( unittest.TestCase ): __lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def __UpperCamelCase ( cls ) -> Tuple: """simple docstring""" UpperCamelCase = TOKEN HfFolder.save_token(A_ ) @classmethod def __UpperCamelCase ( cls ) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def __UpperCamelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __UpperCamelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizer(A_ ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token ) UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __UpperCamelCase ( self ) -> Dict: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = CustomTokenizer(A_ ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = os.path.join(A_ , 'vocab.txt' ) with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) UpperCamelCase = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) UpperCamelCase = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def __UpperCamelCase ( self ) -> int: """simple docstring""" # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCamelCase = Trie() UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(A_ , ['AB', 'C'] )
3
1
_UpperCAmelCase : int = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
3
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' assert isinstance(lowercase , lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if issubclass(lowercase , lowercase ): UpperCamelCase = parquet_path elif issubclass(lowercase , lowercase ): UpperCamelCase = [parquet_path] UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_dataset(lowercase , lowercase ) def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple: '''simple docstring''' assert isinstance(lowercase , lowercase ) for split in splits: UpperCamelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = features.copy() if features else default_expected_features UpperCamelCase = ( Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if split: UpperCamelCase = {split: parquet_path} else: UpperCamelCase = 'train' UpperCamelCase = {'train': parquet_path, 'test': parquet_path} UpperCamelCase = tmp_path / 'cache' UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read() _check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) UpperCamelCase = pf.read() assert dataset.data.table == output_table def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' ) UpperCamelCase = {'image': [image_path]} UpperCamelCase = Features({'image': Image()} ) UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase ) UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' ) assert writer.write() > 0 UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' assert get_writer_batch_size(lowercase ) == expected
3
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} _UpperCAmelCase : Dict = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } _UpperCAmelCase : Tuple = { "allenai/longformer-base-4096": 4_096, "allenai/longformer-large-4096": 4_096, "allenai/longformer-large-4096-finetuned-triviaqa": 4_096, "allenai/longformer-base-4096-extra.pos.embd.only": 4_096, "allenai/longformer-large-4096-extra.pos.embd.only": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def A ( ) -> List[Any]: '''simple docstring''' UpperCamelCase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def A ( lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> int: """simple docstring""" UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self , A_ ) -> List[Any]: """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(A_ ) UpperCamelCase = get_pairs(A_ ) if not pairs: return token while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = ' '.join(A_ ) UpperCamelCase = word return word def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , A_ ): UpperCamelCase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" return self.decoder.get(A_ ) def __UpperCamelCase ( self , A_ ) -> Dict: """simple docstring""" UpperCamelCase = ''.join(A_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_=False , **A_ ) -> Any: """simple docstring""" UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): UpperCamelCase = ' ' + text return (text, kwargs)
3
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size if size is not None else {'height': 18, 'width': 20} UpperCamelCase = do_thumbnail UpperCamelCase = do_align_axis UpperCamelCase = do_pad UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = DonutImageProcessingTester(self ) @property def __UpperCamelCase ( self ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) self.assertTrue(hasattr(A_ , 'do_thumbnail' ) ) self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def __UpperCamelCase ( self ) -> int: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Any: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
3
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def A ( lowercase , lowercase=None ) -> Tuple: '''simple docstring''' UpperCamelCase = None if token is not None: UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''} UpperCamelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' UpperCamelCase = requests.get(lowercase , headers=lowercase ).json() UpperCamelCase = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowercase ): UpperCamelCase = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def A ( lowercase , lowercase=None ) -> List[Any]: '''simple docstring''' UpperCamelCase = None if token is not None: UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''} UpperCamelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' UpperCamelCase = requests.get(lowercase , headers=lowercase ).json() UpperCamelCase = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowercase ): UpperCamelCase = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def A ( lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = None if token is not None: UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''} UpperCamelCase = requests.get(lowercase , headers=lowercase , allow_redirects=lowercase ) UpperCamelCase = result.headers['Location'] UpperCamelCase = requests.get(lowercase , allow_redirects=lowercase ) UpperCamelCase = os.path.join(lowercase , f'''{artifact_name}.zip''' ) with open(lowercase , 'wb' ) as fp: fp.write(response.content ) def A ( lowercase , lowercase=None ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = None with zipfile.ZipFile(lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(lowercase ) as f: for line in f: UpperCamelCase = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCamelCase = line[: line.index(': ' )] UpperCamelCase = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed UpperCamelCase = line[len('FAILED ' ) :] failed_tests.append(lowercase ) elif filename == "job_name.txt": UpperCamelCase = line if len(lowercase ) != len(lowercase ): raise ValueError( f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase )} for `errors` ''' f'''and {len(lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' ' problem.' ) UpperCamelCase = None if job_name and job_links: UpperCamelCase = job_links.get(lowercase , lowercase ) # A list with elements of the form (line of error, error, failed test) UpperCamelCase = [x + [y] + [job_link] for x, y in zip(lowercase , lowercase )] return result def A ( lowercase , lowercase=None ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = [os.path.join(lowercase , lowercase ) for p in os.listdir(lowercase ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(lowercase , job_links=lowercase ) ) return errors def A ( lowercase , lowercase=None ) -> Any: '''simple docstring''' UpperCamelCase = Counter() counter.update([x[1] for x in logs] ) UpperCamelCase = counter.most_common() UpperCamelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCamelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} UpperCamelCase = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) ) return r def A ( lowercase ) -> List[str]: '''simple docstring''' UpperCamelCase = test.split('::' )[0] if test.startswith('tests/models/' ): UpperCamelCase = test.split('/' )[2] else: UpperCamelCase = None return test def A ( lowercase , lowercase=None ) -> List[Any]: '''simple docstring''' UpperCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCamelCase = [x for x in logs if x[2] is not None] UpperCamelCase = {x[2] for x in logs} UpperCamelCase = {} for test in tests: UpperCamelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCamelCase = counter.most_common() UpperCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCamelCase = sum(error_counts.values() ) if n_errors > 0: UpperCamelCase = {'count': n_errors, 'errors': error_counts} UpperCamelCase = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) ) return r def A ( lowercase ) -> Any: '''simple docstring''' UpperCamelCase = '| no. | error | status |' UpperCamelCase = '|-:|:-|:-|' UpperCamelCase = [header, sep] for error in reduced_by_error: UpperCamelCase = reduced_by_error[error]['count'] UpperCamelCase = f'''| {count} | {error[:100]} | |''' lines.append(lowercase ) return "\n".join(lowercase ) def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = '| model | no. of errors | major error | count |' UpperCamelCase = '|-:|-:|-:|-:|' UpperCamelCase = [header, sep] for model in reduced_by_model: UpperCamelCase = reduced_by_model[model]['count'] UpperCamelCase , UpperCamelCase = list(reduced_by_model[model]['errors'].items() )[0] UpperCamelCase = f'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(lowercase ) return "\n".join(lowercase ) if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _UpperCAmelCase : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _UpperCAmelCase : str = get_job_links(args.workflow_run_id, token=args.token) _UpperCAmelCase : List[str] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _UpperCAmelCase : Optional[Any] = k.find(" / ") _UpperCAmelCase : Union[str, Any] = k[index + len(" / ") :] _UpperCAmelCase : int = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _UpperCAmelCase : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _UpperCAmelCase : str = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _UpperCAmelCase : str = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _UpperCAmelCase : Dict = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _UpperCAmelCase : str = reduce_by_error(errors) _UpperCAmelCase : Union[str, Any] = reduce_by_model(errors) _UpperCAmelCase : Tuple = make_github_table(reduced_by_error) _UpperCAmelCase : int = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
3
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _UpperCAmelCase : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512} def A ( lowercase ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char UpperCamelCase = set(lowercase ) return pairs class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]: """simple docstring""" super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ ) UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ ) UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ ) if "\n" in token: UpperCamelCase = token.replace('\n' , ' __newln__' ) UpperCamelCase = token.split(' ' ) UpperCamelCase = [] for token in tokens: if not len(A_ ): continue UpperCamelCase = token.lower() UpperCamelCase = tuple(A_ ) UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCamelCase = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) new_word.extend(word[i:j] ) UpperCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = '@@ '.join(A_ ) UpperCamelCase = word[:-4] UpperCamelCase = word words.append(A_ ) return " ".join(A_ ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = re.findall(r'\S+\n?' , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) ) return split_tokens def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" return self.decoder.get(A_ , self.unk_token ) def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip() return out_string def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file
3
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = BlipImageProcessor() UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) UpperCamelCase = BlipaProcessor(A_ , A_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self , **A_ ) -> int: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer def __UpperCamelCase ( self , **A_ ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) UpperCamelCase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) UpperCamelCase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(A_ , return_tensors='np' ) UpperCamelCase = processor(images=A_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase = 'lower newer' UpperCamelCase = processor(text=A_ ) UpperCamelCase = tokenizer(A_ , return_token_type_ids=A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase = 'lower newer' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(A_ ) UpperCamelCase = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipaProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase = 'lower newer' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=A_ , images=A_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
3
def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = int(lowercase ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase ) UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 ) return binary_recursive(lowercase ) + str(lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = str(lowercase ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
3
1
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowercase : def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ) -> Any: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = patch_size UpperCamelCase = num_frames UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = attention_type UpperCamelCase = initializer_range UpperCamelCase = scope UpperCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = (num_frames) * self.num_patches_per_frame + 1 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) UpperCamelCase = self.num_labels return config def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict: """simple docstring""" UpperCamelCase = TimesformerModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str: """simple docstring""" UpperCamelCase = TimesformerForVideoClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) # verify the logits shape UpperCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , A_ ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __lowercase : Dict = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) __lowercase : Optional[Any] = False __lowercase : int = False __lowercase : Any = False __lowercase : Any = False def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = TimesformerModelTester(self ) UpperCamelCase = ConfigTester( self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Any: """simple docstring""" UpperCamelCase = copy.deepcopy(A_ ) if return_labels: if model_class in get_values(A_ ): UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A_ ) return inputs_dict def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='TimeSformer does not use inputs_embeds' ) def __UpperCamelCase ( self ) -> str: """simple docstring""" pass def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*A_ ) @slow def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TimesformerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = True for model_class in self.all_model_classes: UpperCamelCase = self.model_tester.seq_length UpperCamelCase = self.model_tester.num_frames UpperCamelCase = True UpperCamelCase = False UpperCamelCase = True UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCamelCase = True UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) UpperCamelCase = len(A_ ) # Check attention is always last and order is fine UpperCamelCase = True UpperCamelCase = True UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + 1 , len(A_ ) ) UpperCamelCase = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(A_ ) , A_ ) UpperCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) def A ( ) -> int: '''simple docstring''' UpperCamelCase = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) UpperCamelCase = np.load(lowercase ) return list(lowercase ) @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to( A_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_video() UpperCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) # verify the logits UpperCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
3
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" _UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" _UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def A ( lowercase , lowercase ) -> List[str]: '''simple docstring''' return float((preds == labels).mean() ) def A ( lowercase , lowercase ) -> Tuple: '''simple docstring''' UpperCamelCase = simple_accuracy(lowercase , lowercase ) UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] ) UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __UpperCamelCase ( self , A_ , A_ ) -> Any: """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(A_ , A_ )} elif self.config_name == "stsb": return pearson_and_spearman(A_ , A_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(A_ , A_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(A_ , A_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
3
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _UpperCAmelCase : str = "scheduler_config.json" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Tuple = 1 __lowercase : int = 2 __lowercase : List[Any] = 3 __lowercase : str = 4 __lowercase : Optional[Any] = 5 @dataclass class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : jnp.ndarray class lowercase : __lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME __lowercase : Dict = ["dtype"] __lowercase : List[Any] = [] __lowercase : Dict = True @classmethod def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = cls.load_config( pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , ) UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ ) if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ): UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str: """simple docstring""" self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ ) @property def __UpperCamelCase ( self ) -> int: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls ) -> int: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split('.' )[0] ) UpperCamelCase = [ getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ ) ] return compatible_classes def A ( lowercase , lowercase ) -> jnp.ndarray: '''simple docstring''' assert len(lowercase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase ) def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(lowercase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 UpperCamelCase = [] for i in range(lowercase ): UpperCamelCase = i / num_diffusion_timesteps UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) ) return jnp.array(lowercase , dtype=lowercase ) @flax.struct.dataclass class lowercase : __lowercase : jnp.ndarray __lowercase : jnp.ndarray __lowercase : jnp.ndarray @classmethod def __UpperCamelCase ( cls , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = scheduler.config if config.trained_betas is not None: UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) UpperCamelCase = 1.0 - betas UpperCamelCase = jnp.cumprod(A_ , axis=0 ) return cls( alphas=A_ , betas=A_ , alphas_cumprod=A_ , ) def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = state.alphas_cumprod UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase = sqrt_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A ( lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
3
1
import numpy # List of input, output pairs _UpperCAmelCase : Optional[int] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _UpperCAmelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150)) _UpperCAmelCase : Union[str, Any] = [2, 4, 1, 5] _UpperCAmelCase : Tuple = len(train_data) _UpperCAmelCase : Tuple = 0.009 def A ( lowercase , lowercase="train" ) -> Optional[Any]: '''simple docstring''' return calculate_hypothesis_value(lowercase , lowercase ) - output( lowercase , lowercase ) def A ( lowercase ) -> List[str]: '''simple docstring''' UpperCamelCase = 0 for i in range(len(lowercase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A ( lowercase , lowercase ) -> Optional[int]: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A ( lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A ( lowercase , lowercase=m ) -> List[Any]: '''simple docstring''' UpperCamelCase = 0 for i in range(lowercase ): if index == -1: summation_value += _error(lowercase ) else: summation_value += _error(lowercase ) * train_data[i][0][index] return summation_value def A ( lowercase ) -> List[str]: '''simple docstring''' UpperCamelCase = summation_of_cost_derivative(lowercase , lowercase ) / m return cost_derivative_value def A ( ) -> Optional[Any]: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCamelCase = 0.0_0_0_0_0_2 UpperCamelCase = 0 UpperCamelCase = 0 while True: j += 1 UpperCamelCase = [0, 0, 0, 0] for i in range(0 , len(lowercase ) ): UpperCamelCase = get_cost_derivative(i - 1 ) UpperCamelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase , lowercase , atol=lowercase , rtol=lowercase , ): break UpperCamelCase = temp_parameter_vector print(('Number of iterations:', j) ) def A ( ) -> Any: '''simple docstring''' for i in range(len(lowercase ) ): print(('Actual output value:', output(lowercase , 'test' )) ) print(('Hypothesis output:', calculate_hypothesis_value(lowercase , 'test' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
3
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
1
from abc import ABC, abstractmethod from typing import List, Optional class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self ) -> Optional[Any]: """simple docstring""" # test for the above condition self.test() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not completed: if counter == 1: self.reset() UpperCamelCase = self.advance() if not self.does_advance(A_ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ ) counter += 1 if counter > 10_000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> Any: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Any: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCamelCase = token_ids UpperCamelCase = len(self.token_ids ) UpperCamelCase = -1 # the index of the currently fulfilled step UpperCamelCase = False def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.fulfilled_idx += 1 UpperCamelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCamelCase = True UpperCamelCase = completed else: # failed to make progress. UpperCamelCase = True self.reset() return stepped, completed, reset def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = False UpperCamelCase = 0 def __UpperCamelCase ( self ) -> int: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.fulfilled_idx UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = max([len(A_ ) for one in nested_token_ids] ) UpperCamelCase = {} for token_ids in nested_token_ids: UpperCamelCase = root for tidx, token_id in enumerate(A_ ): if token_id not in level: UpperCamelCase = {} UpperCamelCase = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F''' {nested_token_ids}.''' ) UpperCamelCase = root def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.trie for current_token in current_seq: UpperCamelCase = start[current_token] UpperCamelCase = list(start.keys() ) return next_tokens def __UpperCamelCase ( self , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.next_tokens(A_ ) return len(A_ ) == 0 def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" UpperCamelCase = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.count_leaves(A_ ) return len(A_ ) != leaf_count class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> str: """simple docstring""" super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCamelCase = DisjunctiveTrie(A_ ) UpperCamelCase = nested_token_ids UpperCamelCase = self.trie.max_height UpperCamelCase = [] UpperCamelCase = False def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False if self.does_advance(A_ ): self.current_seq.append(A_ ) UpperCamelCase = True else: UpperCamelCase = True self.reset() UpperCamelCase = self.trie.reached_leaf(self.current_seq ) UpperCamelCase = completed return stepped, completed, reset def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = False UpperCamelCase = [] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCamelCase ( self , A_=False ) -> int: """simple docstring""" UpperCamelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCamelCase = self.seqlen UpperCamelCase = self.current_seq UpperCamelCase = self.completed return new_constraint class lowercase : def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = constraints # max # of steps required to fulfill a given constraint UpperCamelCase = max([c.seqlen for c in constraints] ) UpperCamelCase = len(A_ ) UpperCamelCase = False self.init_state() def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = None UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCamelCase = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: UpperCamelCase = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def __UpperCamelCase ( self , A_ ) -> Any: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCamelCase , UpperCamelCase = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" if not isinstance(A_ , A_ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCamelCase , UpperCamelCase = False, False if self.completed: UpperCamelCase = True UpperCamelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) UpperCamelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCamelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCamelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(A_ ) UpperCamelCase = None if not complete and stepped: UpperCamelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCamelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCamelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCamelCase ( self , A_=True ) -> Tuple: """simple docstring""" UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCamelCase = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ ) UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
3
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , A_ , A_ = None , A_ = None ) -> Any: """simple docstring""" super().__init__() UpperCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCamelCase = torch.zeros(A_ , A_ ) else: UpperCamelCase = None UpperCamelCase = torch.nn.Parameter(A_ ) class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : VQModel __lowercase : CLIPTextModel __lowercase : CLIPTokenizer __lowercase : TransformeraDModel __lowercase : LearnedClassifierFreeSamplingEmbeddings __lowercase : VQDiffusionScheduler def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1 # get prompt text embeddings UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 ) else: UpperCamelCase = [''] * batch_size UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = negative_prompt_embeds.shape[1] UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 ) UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(A_ , A_ ): UpperCamelCase = 1 elif isinstance(A_ , A_ ): UpperCamelCase = len(A_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' ) UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_prompt(A_ , A_ , A_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(A_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCamelCase = self.transformer.num_vector_embeds - 1 UpperCamelCase = torch.full(A_ , A_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps.to(self.device ) UpperCamelCase = latents for i, t in enumerate(self.progress_bar(A_ ) ): # expand the sample if we are doing classifier free guidance UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = model_output.chunk(2 ) UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ ) UpperCamelCase = self.truncate(A_ , A_ ) # remove `log(0)`'s (`-inf`s) UpperCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A_ , A_ , A_ ) UpperCamelCase = self.vqvae.config.vq_embed_dim UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ ) UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ ) def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor: """simple docstring""" UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ ) UpperCamelCase = torch.exp(A_ ) UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ ) UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) UpperCamelCase = keep_mask[:, :-1, :] UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCamelCase = log_p_x_0.clone() UpperCamelCase = -torch.inf # -inf = log(0) return rv
3
1
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
from string import ascii_uppercase _UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)} _UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase)) def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = len(lowercase ) UpperCamelCase = 0 while True: if x == i: UpperCamelCase = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def A ( lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def A ( ) -> None: '''simple docstring''' UpperCamelCase = 'THE GERMAN ATTACK' UpperCamelCase = 'SECRET' UpperCamelCase = generate_key(lowercase , lowercase ) UpperCamelCase = cipher_text(lowercase , lowercase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(lowercase , lowercase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
3
1
def A ( ) -> str: '''simple docstring''' for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def A ( lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = 1 UpperCamelCase = 2 while i * i <= n: UpperCamelCase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def A ( ) -> Any: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 500 ) if __name__ == "__main__": print(solution())
3
from collections.abc import Callable def A ( lowercase , lowercase , lowercase ) -> float: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def A ( lowercase ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
3
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_=0.01 , A_=1_000 ) -> str: """simple docstring""" UpperCamelCase = p_stop UpperCamelCase = max_length def __iter__( self ) -> Tuple: """simple docstring""" UpperCamelCase = 0 UpperCamelCase = False while not stop and count < self.max_length: yield count count += 1 UpperCamelCase = random.random() < self.p_stop class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self , A_ , A_ , A_=False , A_=True ) -> List[Any]: """simple docstring""" UpperCamelCase = [ BatchSamplerShard(A_ , 2 , A_ , split_batches=A_ , even_batches=A_ ) for i in range(2 ) ] UpperCamelCase = [list(A_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(A_ ) for shard in batch_sampler_shards] , [len(A_ ) for e in expected] ) self.assertListEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" # Check the shards when the dataset is a round multiple of total batch size. UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(A_ , A_ ) UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ ) # Expected shouldn't change self.check_batch_sampler_shards(A_ , A_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(A_ , A_ ) UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(A_ , A_ ) UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(A_ , A_ ) UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ ) # Check the shards when the dataset is very small. UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(A_ , A_ ) UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [[], []] self.check_batch_sampler_shards(A_ , A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" # Check the shards when the dataset is a round multiple of batch size. UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ ) # Expected shouldn't change self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) # Check the shards when the dataset is very small. UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [[], []] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" # Check the shards when the dataset is a round multiple of total batch size. UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ ) # Expected shouldn't change self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) # Check the shards when the dataset is very small. UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ ) UpperCamelCase = [[], []] self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # Check the shards when the dataset is a round multiple of batch size. UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ ) # Expected shouldn't change self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) # Check the shards when the dataset is very small. UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = [[], []] self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] UpperCamelCase = [BatchSamplerShard(A_ , 2 , A_ , even_batches=A_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_=False , A_=2 , A_=False ) -> Union[str, Any]: """simple docstring""" random.seed(A_ ) UpperCamelCase = list(A_ ) UpperCamelCase = [ IterableDatasetShard( A_ , batch_size=A_ , drop_last=A_ , num_processes=A_ , process_index=A_ , split_batches=A_ , ) for i in range(A_ ) ] UpperCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(A_ ) iterable_dataset_lists.append(list(A_ ) ) UpperCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size UpperCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(A_ ) , len(A_ ) ) self.assertTrue(len(A_ ) % shard_batch_size == 0 ) UpperCamelCase = [] for idx in range(0 , len(A_ ) , A_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(A_ ) < len(A_ ): reference += reference self.assertListEqual(A_ , reference[: len(A_ )] ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 42 UpperCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) # Edge case with a very small dataset UpperCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=A_ ) UpperCamelCase = SkipBatchSampler(A_ , 2 ) self.assertListEqual(list(A_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) UpperCamelCase = skip_first_batches(A_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(A_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(A_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" Accelerator() UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(A_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(A_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
3
import os _UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def A ( lowercase ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = 0 while index < len(lowercase ) - 1: UpperCamelCase = SYMBOLS[numerals[index]] UpperCamelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 UpperCamelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 UpperCamelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( lowercase = "/p089_roman.txt" ) -> int: '''simple docstring''' UpperCamelCase = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: UpperCamelCase = filea.readlines() for line in lines: UpperCamelCase = line.strip() UpperCamelCase = parse_roman_numerals(lowercase ) UpperCamelCase = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
3
1
import logging import os import threading import time try: import warnings except ImportError: _UpperCAmelCase : Tuple = None try: import msvcrt except ImportError: _UpperCAmelCase : List[str] = None try: import fcntl except ImportError: _UpperCAmelCase : int = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _UpperCAmelCase : Optional[int] = OSError # Data # ------------------------------------------------ _UpperCAmelCase : int = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] _UpperCAmelCase : List[str] = "3.0.12" _UpperCAmelCase : str = None def A ( ) -> Dict: '''simple docstring''' global _logger UpperCamelCase = _logger or logging.getLogger(__name__ ) return _logger class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = lock_file return None def __str__( self ) -> Any: """simple docstring""" UpperCamelCase = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class lowercase : def __init__( self , A_ ) -> str: """simple docstring""" UpperCamelCase = lock return None def __enter__( self ) -> Optional[int]: """simple docstring""" return self.lock def __exit__( self , A_ , A_ , A_ ) -> List[str]: """simple docstring""" self.lock.release() return None class lowercase : def __init__( self , A_ , A_=-1 , A_=None ) -> int: """simple docstring""" UpperCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase = None # The default timeout value. UpperCamelCase = timeout # We use this lock primarily for the lock counter. UpperCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase = 0 return None @property def __UpperCamelCase ( self ) -> str: """simple docstring""" return self._lock_file @property def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" return self._timeout @timeout.setter def __UpperCamelCase ( self , A_ ) -> int: """simple docstring""" UpperCamelCase = float(A_ ) return None def __UpperCamelCase ( self ) -> str: """simple docstring""" raise NotImplementedError() def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" raise NotImplementedError() @property def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" return self._lock_file_fd is not None def __UpperCamelCase ( self , A_=None , A_=0.05 ) -> List[str]: """simple docstring""" # Use the default timeout, if no timeout is provided. if timeout is None: UpperCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase = id(self ) UpperCamelCase = self._lock_file UpperCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase ( self , A_=False ) -> Any: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase = id(self ) UpperCamelCase = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() UpperCamelCase = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self ) -> List[str]: """simple docstring""" self.acquire() return self def __exit__( self , A_ , A_ , A_ ) -> Optional[int]: """simple docstring""" self.release() return None def __del__( self ) -> str: """simple docstring""" self.release(force=A_ ) return None def __UpperCamelCase ( self , A_ , A_ ) -> str: """simple docstring""" UpperCamelCase = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase = os.path.dirname(A_ ) UpperCamelCase = str(hash(A_ ) ) UpperCamelCase = filename[: max_length - len(A_ ) - 8] + '...' + hashed_filename + '.lock' return os.path.join(A_ , A_ ) else: return path class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , A_=-1 , A_=None ) -> List[str]: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase = fd return None def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self._lock_file_fd UpperCamelCase = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , A_=-1 , A_=None ) -> Optional[int]: """simple docstring""" UpperCamelCase = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase = fd return None def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition UpperCamelCase = self._lock_file_fd UpperCamelCase = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class lowercase ( _SCREAMING_SNAKE_CASE ): def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase = fd return None def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" os.close(self._lock_file_fd ) UpperCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _UpperCAmelCase : str = None if msvcrt: _UpperCAmelCase : Tuple = WindowsFileLock elif fcntl: _UpperCAmelCase : Any = UnixFileLock else: _UpperCAmelCase : int = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
from __future__ import annotations from typing import Generic, TypeVar _UpperCAmelCase : Union[str, Any] = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self , A_ ) -> None: """simple docstring""" UpperCamelCase = data UpperCamelCase = self UpperCamelCase = 0 class lowercase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" # map from node name to the node object UpperCamelCase = {} def __UpperCamelCase ( self , A_ ) -> None: """simple docstring""" # create a new set with x as its member UpperCamelCase = DisjointSetTreeNode(A_ ) def __UpperCamelCase ( self , A_ ) -> DisjointSetTreeNode[T]: """simple docstring""" # find the set x belongs to (with path-compression) UpperCamelCase = self.map[data] if elem_ref != elem_ref.parent: UpperCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __UpperCamelCase ( self , A_ , A_ ) -> None: """simple docstring""" # helper function for union operation if nodea.rank > nodea.rank: UpperCamelCase = nodea else: UpperCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __UpperCamelCase ( self , A_ , A_ ) -> None: """simple docstring""" # merge 2 disjoint sets self.link(self.find_set(A_ ) , self.find_set(A_ ) ) class lowercase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" # connections: map from the node to the neighbouring nodes (with weights) UpperCamelCase = {} def __UpperCamelCase ( self , A_ ) -> None: """simple docstring""" # add a node ONLY if its not present in the graph if node not in self.connections: UpperCamelCase = {} def __UpperCamelCase ( self , A_ , A_ , A_ ) -> None: """simple docstring""" # add an edge with the given weight self.add_node(A_ ) self.add_node(A_ ) UpperCamelCase = weight UpperCamelCase = weight def __UpperCamelCase ( self ) -> GraphUndirectedWeighted[T]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda A_ : x[2] ) # creating the disjoint set UpperCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(A_ ) # MST generation UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCamelCase , UpperCamelCase , UpperCamelCase = edges[index] index += 1 UpperCamelCase = disjoint_set.find_set(A_ ) UpperCamelCase = disjoint_set.find_set(A_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(A_ , A_ , A_ ) disjoint_set.union(A_ , A_ ) return graph
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[str] = AudioLDMPipeline __lowercase : Optional[Any] = TEXT_TO_AUDIO_PARAMS __lowercase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS __lowercase : Dict = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=A_ , ) UpperCamelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , ) UpperCamelCase = ClapTextModelWithProjection(A_ ) UpperCamelCase = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 ) UpperCamelCase = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=A_ , ) UpperCamelCase = SpeechTaHifiGan(A_ ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def __UpperCamelCase ( self , A_ , A_=0 ) -> Dict: """simple docstring""" if str(A_ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(A_ ) else: UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = audioldm_pipe(**A_ ) UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 UpperCamelCase = audio[:10] UpperCamelCase = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * [inputs['prompt']] # forward UpperCamelCase = audioldm_pipe(**A_ ) UpperCamelCase = output.audios[0] UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * [inputs.pop('prompt' )] UpperCamelCase = audioldm_pipe.tokenizer( A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = text_inputs['input_ids'].to(A_ ) UpperCamelCase = audioldm_pipe.text_encoder( A_ , ) UpperCamelCase = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCamelCase = F.normalize(A_ , dim=-1 ) UpperCamelCase = prompt_embeds # forward UpperCamelCase = audioldm_pipe(**A_ ) UpperCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * ['this is a negative prompt'] UpperCamelCase = negative_prompt UpperCamelCase = 3 * [inputs['prompt']] # forward UpperCamelCase = audioldm_pipe(**A_ ) UpperCamelCase = output.audios[0] UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 3 * [inputs.pop('prompt' )] UpperCamelCase = [] for p in [prompt, negative_prompt]: UpperCamelCase = audioldm_pipe.tokenizer( A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , ) UpperCamelCase = text_inputs['input_ids'].to(A_ ) UpperCamelCase = audioldm_pipe.text_encoder( A_ , ) UpperCamelCase = text_embeds.text_embeds # additional L_2 normalization over each hidden-state UpperCamelCase = F.normalize(A_ , dim=-1 ) embeds.append(A_ ) UpperCamelCase , UpperCamelCase = embeds # forward UpperCamelCase = audioldm_pipe(**A_ ) UpperCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ ) UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = 'egg cracking' UpperCamelCase = audioldm_pipe(**A_ , negative_prompt=A_ ) UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 UpperCamelCase = audio[:10] UpperCamelCase = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ ) UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts UpperCamelCase = 2 UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt UpperCamelCase = 2 UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts UpperCamelCase = 2 UpperCamelCase = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate UpperCamelCase = self.get_dummy_inputs(A_ ) UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **A_ ) UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.016 UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **A_ ) UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.032 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = AudioLDMPipeline(**A_ ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = ['hey'] UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 ) UpperCamelCase = output.audios.shape assert audio_shape == (1, 256) UpperCamelCase = audioldm_pipe.vocoder.config config.model_in_dim *= 2 UpperCamelCase = SpeechTaHifiGan(A_ ).to(A_ ) UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 ) UpperCamelCase = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def __UpperCamelCase ( self ) -> str: """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ) -> List[str]: """simple docstring""" UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) UpperCamelCase = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_inputs(A_ ) UpperCamelCase = 25 UpperCamelCase = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 81_920 UpperCamelCase = audio[77_230:77_240] UpperCamelCase = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) UpperCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) UpperCamelCase = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase = self.get_inputs(A_ ) UpperCamelCase = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 81_920 UpperCamelCase = audio[27_780:27_790] UpperCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) UpperCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
3
import re def A ( lowercase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = None __lowercase : Any = BloomTokenizerFast __lowercase : List[str] = BloomTokenizerFast __lowercase : Tuple = True __lowercase : Union[str, Any] = False __lowercase : Tuple = "tokenizer_file" __lowercase : Optional[int] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def __UpperCamelCase ( self ) -> str: """simple docstring""" super().setUp() UpperCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self , **A_ ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] UpperCamelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]] UpperCamelCase = tokenizer.batch_encode_plus(A_ )['input_ids'] self.assertListEqual(A_ , A_ ) UpperCamelCase = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ ) def __UpperCamelCase ( self , A_=6 ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input UpperCamelCase = 'This is a simple input' UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2'] UpperCamelCase = ('This is a simple input', 'This is a pair') UpperCamelCase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(A_ , max_length=A_ ) tokenizer_r.encode_plus(A_ , max_length=A_ ) tokenizer_r.batch_encode_plus(A_ , max_length=A_ ) tokenizer_r.encode(A_ , max_length=A_ ) tokenizer_r.batch_encode_plus(A_ , max_length=A_ ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) UpperCamelCase = None # Hotfixing padding = None self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=A_ ) UpperCamelCase = next(iter(A_ ) )['premise'] # pick up one data UpperCamelCase = list(sample_data.values() ) UpperCamelCase = list(map(tokenizer.encode , A_ ) ) UpperCamelCase = [tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ ) for x in output_tokens] self.assertListEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
3
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = (DDPMScheduler,) def __UpperCamelCase ( self , **A_ ) -> Dict: """simple docstring""" UpperCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(A_ ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = self.get_config() return config, pixel_values def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __UpperCamelCase ( self , A_ , A_ ) -> str: """simple docstring""" UpperCamelCase = FlaxRegNetModel(config=A_ ) UpperCamelCase = model(A_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __UpperCamelCase ( self , A_ , A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = FlaxRegNetForImageClassification(config=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __lowercase : int = False __lowercase : Dict = False __lowercase : Optional[Any] = False def __UpperCamelCase ( self ) -> None: """simple docstring""" UpperCamelCase = FlaxRegNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase = model_class(A_ ) UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(A_ , A_ ) UpperCamelCase = model_class(A_ ) @jax.jit def model_jitted(A_ , **A_ ): return model(pixel_values=A_ , **A_ ) with self.subTest('JIT Enabled' ): UpperCamelCase = model_jitted(**A_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = model_jitted(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) ) for jitted_output, output in zip(A_ , A_ ): self.assertEqual(jitted_output.shape , output.shape ) def A ( ) -> List[str]: '''simple docstring''' UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class lowercase ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='np' ) UpperCamelCase = model(**A_ ) # verify the logits UpperCamelCase = (1, 1_000) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
3
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[int] = { "camembert-base": 512, } _UpperCAmelCase : Union[str, Any] = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : str = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ["input_ids", "attention_mask"] __lowercase : Tuple = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
3
1