code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = get_activation("""swish""" ) self.assertIsInstance(_lowerCAmelCase ,nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = get_activation("""silu""" ) self.assertIsInstance(_lowerCAmelCase ,nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = get_activation("""mish""" ) self.assertIsInstance(_lowerCAmelCase ,nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = get_activation("""gelu""" ) self.assertIsInstance(_lowerCAmelCase ,nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' import numpy as np import datasets UpperCamelCase : List[Any] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' UpperCamelCase : Dict = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' UpperCamelCase : Any = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" ,id="""sequence""" ) ,id="""X""" ), } ) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): # convert to numpy arrays lowerCamelCase__ = np.array(_lowerCAmelCase ) lowerCamelCase__ = np.array(_lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction lowerCamelCase__ = X - np.mean(_lowerCAmelCase ) lowerCamelCase__ = np.cov(reference_distribution.T ) try: lowerCamelCase__ = np.linalg.inv(_lowerCAmelCase ) except np.linalg.LinAlgError: lowerCamelCase__ = np.linalg.pinv(_lowerCAmelCase ) lowerCamelCase__ = np.dot(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = np.dot(_lowerCAmelCase ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() ) lowerCamelCase__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase : int = logging.getLogger(__name__) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ): if metric == "rouge2": lowerCamelCase__ = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": lowerCamelCase__ = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": lowerCamelCase__ = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": lowerCamelCase__ = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""" ) lowerCamelCase__ = ModelCheckpoint( dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , ) class UpperCamelCase__ (pl.Callback ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCAmelCase ) @rank_zero_only def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=True ): logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowerCamelCase__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results lowerCamelCase__ = Path(pl_module.hparams.output_dir ) if type_path == "test": lowerCamelCase__ = od / """test_results.txt""" lowerCamelCase__ = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowerCamelCase__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' lowerCamelCase__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_lowerCAmelCase ) generations_file.parent.mkdir(exist_ok=_lowerCAmelCase ) with open(_lowerCAmelCase ,"""a+""" ) as writer: for key in sorted(_lowerCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue lowerCamelCase__ = metrics[key] if isinstance(_lowerCAmelCase ,torch.Tensor ): lowerCamelCase__ = val.item() lowerCamelCase__ = F'''{key}: {val:.6f}\n''' writer.write(_lowerCAmelCase ) if not save_generations: return if "preds" in metrics: lowerCamelCase__ = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(_lowerCAmelCase ) @rank_zero_only def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): try: lowerCamelCase__ = pl_module.model.model.num_parameters() except AttributeError: lowerCamelCase__ = pl_module.model.num_parameters() lowerCamelCase__ = count_trainable_parameters(_lowerCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) return self._write_logs(_lowerCAmelCase ,_lowerCAmelCase ,"""test""" ) @rank_zero_only def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = torch.permute(__lowerCAmelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ): # linear layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ): if "metadata" in layer: lowerCamelCase__ = layer.split("""metadata""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: lowerCamelCase__ = layer.split("""kvstore""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: lowerCamelCase__ = layer.split("""/""" ) lowerCamelCase__ = """/""".join(split_layer[:-1] ) lowerCamelCase__ = (split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase__ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: lowerCamelCase__ = """file""" else: lowerCamelCase__ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple ): lowerCamelCase__ = rename_keys(__lowerCAmelCase ) lowerCamelCase__ = {} for k, v in current_block.items(): lowerCamelCase__ = v lowerCamelCase__ = new_current_block torch.save(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = WEIGHTS_NAME ): lowerCamelCase__ = convert_file_size_to_int(__lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: lowerCamelCase__ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] lowerCamelCase__ = flatten_dict(__lowerCAmelCase , sep="""/""" ) lowerCamelCase__ = {} for layer in checkpoint_info.keys(): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_key_and_tensorstore_dict( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if curr_real_layer_name in all_layers: lowerCamelCase__ = content else: lowerCamelCase__ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase__ = torch.tensor(__lowerCAmelCase ) lowerCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase__ , lowerCamelCase__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCAmelCase ) lowerCamelCase__ = """/""".join(__lowerCAmelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase__ = os.path.join( __lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = raw_weights.to(getattr(__lowerCAmelCase , __lowerCAmelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase__ = os.path.join(__lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCAmelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase__ = {} lowerCamelCase__ = {} for idx, shard in enumerate(__lowerCAmelCase ): lowerCamelCase__ = weights_name.replace( """.bin""" , F'''-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d} lowerCamelCase__ = os.path.join(__lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) lowerCamelCase__ = shard for key in shard: lowerCamelCase__ = shard_file # Add the metadata lowerCamelCase__ = {"""total_size""": total_size} lowerCamelCase__ = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n""" f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) UpperCamelCase : Dict = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A__ ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) lowerCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) lowerCamelCase__ = TaTokenizer.from_pretrained("""t5-small""" ) lowerCamelCase__ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids lowerCamelCase__ = model.generate(__lowerCAmelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def A__ ( ): lowerCamelCase__ = HfArgumentParser(__lowerCAmelCase ) lowerCamelCase__ = parser.parse_args_into_dataclasses()[0] lowerCamelCase__ = TensorFlowBenchmark(args=__lowerCAmelCase ) try: lowerCamelCase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowerCamelCase__ = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowerCamelCase__ = """ """.join(str(__lowerCAmelCase ).split(""" """ )[:-1] ) lowerCamelCase__ = """""" lowerCamelCase__ = eval(str(__lowerCAmelCase ).split(""" """ )[-1] ) lowerCamelCase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = full_error_msg + begin_error_msg + str(__lowerCAmelCase ) raise ValueError(__lowerCAmelCase ) benchmark.run() if __name__ == "__main__": main()
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=7 ): lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) lowerCamelCase__ = """636036""" lowerCamelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() return result["workflow_runs"] def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = get_daily_ci_runs(__lowerCAmelCase ) lowerCamelCase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowerCamelCase__ = workflow_run["""id"""] break return workflow_run_id def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = get_last_daily_ci_runs(__lowerCAmelCase ) if workflow_run_id is not None: lowerCamelCase__ = get_artifacts_links(worflow_run_id=__lowerCAmelCase , token=__lowerCAmelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowerCamelCase__ = artifacts_links[artifact_name] download_artifact( artifact_name=__lowerCAmelCase , artifact_url=__lowerCAmelCase , output_dir=__lowerCAmelCase , token=__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): get_last_daily_ci_artifacts(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = {} for artifact_name in artifact_names: lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{artifact_name}.zip''' ) if os.path.isfile(__lowerCAmelCase ): lowerCamelCase__ = {} with zipfile.ZipFile(__lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCAmelCase ): # read the file with z.open(__lowerCAmelCase ) as f: lowerCamelCase__ = f.read().decode("""UTF-8""" ) return results
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[Any] = ['DPTFeatureExtractor'] UpperCamelCase : Union[str, Any] = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' import math def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ): if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__lowerCAmelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' # flake8: noqa # Lint as: python3 UpperCamelCase : Any = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=4 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_attention_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_choices def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = None if self.use_attention_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCamelCase__ = RoFormerConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = True _UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxRoFormerModelTester(self ) @slow def UpperCamelCase_ ( self ): for model_class_name in self.all_model_classes: lowerCamelCase__ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" ,from_pt=_lowerCAmelCase ) lowerCamelCase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCAmelCase ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowerCamelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(_lowerCAmelCase )[0] lowerCamelCase__ = 5_00_00 lowerCamelCase__ = (1, 6, vocab_size) self.assertEqual(output.shape ,_lowerCAmelCase ) lowerCamelCase__ = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCamelCase : int = Lock() def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCamelCase__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCamelCase__ = min(__lowerCAmelCase , __lowerCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCamelCase__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] lowerCamelCase__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCamelCase__ = Pipe() lowerCamelCase__ = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCamelCase__ = temp_rs lowerCamelCase__ = temp_rr for i in range(1 , len(__lowerCAmelCase ) - 1 ): lowerCamelCase__ = Pipe() lowerCamelCase__ = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCamelCase__ = temp_rs lowerCamelCase__ = temp_rr process_array_.append( Process( target=__lowerCAmelCase , args=( len(__lowerCAmelCase ) - 1, arr[len(__lowerCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__lowerCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = result_pipe[p][0].recv() process_array_[p].join() return arr def A__ ( ): lowerCamelCase__ = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*__lowerCAmelCase ) lowerCamelCase__ = odd_even_transposition(__lowerCAmelCase ) print("""Sorted List\n""" ) print(*__lowerCAmelCase ) if __name__ == "__main__": main()
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex UpperCamelCase : Optional[Any] = logging.getLogger(__name__) class UpperCamelCase__ : '''simple docstring''' def __init__( self ): lowerCamelCase__ = False def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if not self.initialized: lowerCamelCase__ = RagRetriever( _lowerCAmelCase ,question_encoder_tokenizer=_lowerCAmelCase ,generator_tokenizer=_lowerCAmelCase ,index=_lowerCAmelCase ,init_retrieval=_lowerCAmelCase ,) lowerCamelCase__ = True def UpperCamelCase_ ( self ): self.retriever.index.init_index() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.retriever._main_retrieve(_lowerCAmelCase ,_lowerCAmelCase ) return doc_ids, retrieved_doc_embeds class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ): if index is not None and index.is_initialized() and len(_lowerCAmelCase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( _lowerCAmelCase ,question_encoder_tokenizer=_lowerCAmelCase ,generator_tokenizer=_lowerCAmelCase ,index=_lowerCAmelCase ,init_retrieval=_lowerCAmelCase ,) lowerCamelCase__ = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) for worker in self.retrieval_workers ] ) def UpperCamelCase_ ( self ): logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowerCamelCase__ = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] lowerCamelCase__ , lowerCamelCase__ = ray.get(random_worker.retrieve.remote(_lowerCAmelCase ,_lowerCAmelCase ) ) else: lowerCamelCase__ , lowerCamelCase__ = self._main_retrieve(_lowerCAmelCase ,_lowerCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): return super(_lowerCAmelCase ,cls ).get_tokenizers(_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ = kwargs.pop("""config""" ,_lowerCAmelCase ) or RagConfig.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = RagTokenizer.from_pretrained(_lowerCAmelCase ,config=_lowerCAmelCase ) lowerCamelCase__ = rag_tokenizer.question_encoder lowerCamelCase__ = rag_tokenizer.generator if indexed_dataset is not None: lowerCamelCase__ = """custom""" lowerCamelCase__ = CustomHFIndex(config.retrieval_vector_size ,_lowerCAmelCase ) else: lowerCamelCase__ = cls._build_index(_lowerCAmelCase ) return cls( _lowerCAmelCase ,question_encoder_tokenizer=_lowerCAmelCase ,generator_tokenizer=_lowerCAmelCase ,retrieval_workers=_lowerCAmelCase ,index=_lowerCAmelCase ,)
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int = 400_0000 ): lowerCamelCase__ = [0, 1] lowerCamelCase__ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase__ = 0 for j in range(len(__lowerCAmelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int | float | str ): try: lowerCamelCase__ = float(__lowerCAmelCase ) except ValueError: raise ValueError("""Please enter a valid number""" ) lowerCamelCase__ = decimal - int(__lowerCAmelCase ) if fractional_part == 0: return int(__lowerCAmelCase ), 1 else: lowerCamelCase__ = len(str(__lowerCAmelCase ).split(""".""" )[1] ) lowerCamelCase__ = int(decimal * (10**number_of_frac_digits) ) lowerCamelCase__ = 10**number_of_frac_digits lowerCamelCase__ , lowerCamelCase__ = denominator, numerator while True: lowerCamelCase__ = dividend % divisor if remainder == 0: break lowerCamelCase__ , lowerCamelCase__ = divisor, remainder lowerCamelCase__ , lowerCamelCase__ = numerator / divisor, denominator / divisor return int(__lowerCAmelCase ), int(__lowerCAmelCase ) if __name__ == "__main__": print(F'{decimal_to_fraction(2) = }') print(F'{decimal_to_fraction(89.0) = }') print(F'{decimal_to_fraction("67") = }') print(F'{decimal_to_fraction("45.0") = }') print(F'{decimal_to_fraction(1.5) = }') print(F'{decimal_to_fraction("6.25") = }') print(F'{decimal_to_fraction("78td") = }')
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['vqvae'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): super().__init__() self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,mel=_lowerCAmelCase ,vqvae=_lowerCAmelCase ) def UpperCamelCase_ ( self ): return 50 if isinstance(self.scheduler ,_lowerCAmelCase ) else 10_00 @torch.no_grad() def __call__( self ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,): lowerCamelCase__ = steps or self.get_default_steps() self.scheduler.set_timesteps(_lowerCAmelCase ) lowerCamelCase__ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCamelCase__ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCamelCase__ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_lowerCAmelCase ,device=self.device ,) lowerCamelCase__ = noise lowerCamelCase__ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.mel.audio_slice_to_image(_lowerCAmelCase ) lowerCamelCase__ = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) lowerCamelCase__ = (input_image / 2_55) * 2 - 1 lowerCamelCase__ = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCamelCase__ = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase ,0 ) ).latent_dist.sample( generator=_lowerCAmelCase )[0] lowerCamelCase__ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,self.scheduler.timesteps[start_step - 1] ) lowerCamelCase__ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCamelCase__ = int(mask_start_secs * pixels_per_second ) lowerCamelCase__ = int(mask_end_secs * pixels_per_second ) lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_lowerCAmelCase ): lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] else: lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] if isinstance(self.scheduler ,_lowerCAmelCase ): lowerCamelCase__ = self.scheduler.step( model_output=_lowerCAmelCase ,timestep=_lowerCAmelCase ,sample=_lowerCAmelCase ,eta=_lowerCAmelCase ,generator=_lowerCAmelCase ,)["""prev_sample"""] else: lowerCamelCase__ = self.scheduler.step( model_output=_lowerCAmelCase ,timestep=_lowerCAmelCase ,sample=_lowerCAmelCase ,generator=_lowerCAmelCase ,)["""prev_sample"""] if mask is not None: if mask_start > 0: lowerCamelCase__ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCamelCase__ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCamelCase__ = 1 / self.vqvae.config.scaling_factor * images lowerCamelCase__ = self.vqvae.decode(_lowerCAmelCase )["""sample"""] lowerCamelCase__ = (images / 2 + 0.5).clamp(0 ,1 ) lowerCamelCase__ = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() lowerCamelCase__ = (images * 2_55).round().astype("""uint8""" ) lowerCamelCase__ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_lowerCAmelCase ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) lowerCamelCase__ = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_lowerCAmelCase ) ) @torch.no_grad() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 50 ): assert isinstance(self.scheduler ,_lowerCAmelCase ) self.scheduler.set_timesteps(_lowerCAmelCase ) lowerCamelCase__ = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) lowerCamelCase__ = (sample / 2_55) * 2 - 1 lowerCamelCase__ = torch.Tensor(_lowerCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): lowerCamelCase__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCamelCase__ = self.scheduler.alphas_cumprod[t] lowerCamelCase__ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCamelCase__ = 1 - alpha_prod_t lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] lowerCamelCase__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCamelCase__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCamelCase__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = acos(torch.dot(torch.flatten(_lowerCAmelCase ) ,torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = None _UpperCamelCase = BloomTokenizerFast _UpperCamelCase = BloomTokenizerFast _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = 'tokenizer_file' _UpperCamelCase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def UpperCamelCase_ ( self ): super().setUp() lowerCamelCase__ = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] lowerCamelCase__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] lowerCamelCase__ = tokenizer.batch_encode_plus(_lowerCAmelCase )["""input_ids"""] self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowerCamelCase__ = """This is a simple input""" lowerCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""] lowerCamelCase__ = ("""This is a simple input""", """This is a pair""") lowerCamelCase__ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(_lowerCAmelCase ,max_length=_lowerCAmelCase ) tokenizer_r.encode_plus(_lowerCAmelCase ,max_length=_lowerCAmelCase ) tokenizer_r.batch_encode_plus(_lowerCAmelCase ,max_length=_lowerCAmelCase ) tokenizer_r.encode(_lowerCAmelCase ,max_length=_lowerCAmelCase ) tokenizer_r.batch_encode_plus(_lowerCAmelCase ,max_length=_lowerCAmelCase ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) lowerCamelCase__ = None # Hotfixing padding = None self.assertRaises(_lowerCAmelCase ,tokenizer_r.encode ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ) # Simple input self.assertRaises(_lowerCAmelCase ,tokenizer_r.encode_plus ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ) # Simple input self.assertRaises( _lowerCAmelCase ,tokenizer_r.batch_encode_plus ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ,) # Pair input self.assertRaises(_lowerCAmelCase ,tokenizer_r.encode ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ) # Pair input self.assertRaises(_lowerCAmelCase ,tokenizer_r.encode_plus ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ) # Pair input self.assertRaises( _lowerCAmelCase ,tokenizer_r.batch_encode_plus ,_lowerCAmelCase ,max_length=_lowerCAmelCase ,padding="""max_length""" ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=_lowerCAmelCase ) lowerCamelCase__ = next(iter(_lowerCAmelCase ) )["""premise"""] # pick up one data lowerCamelCase__ = list(sample_data.values() ) lowerCamelCase__ = list(map(tokenizer.encode ,_lowerCAmelCase ) ) lowerCamelCase__ = [tokenizer.decode(_lowerCAmelCase ,clean_up_tokenization_spaces=_lowerCAmelCase ) for x in output_tokens] self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'align_text_model' def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_act lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = position_embedding_type lowerCamelCase__ = use_cache lowerCamelCase__ = pad_token_id @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": lowerCamelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'align_vision_model' def __init__( self ,_lowerCAmelCase = 3 ,_lowerCAmelCase = 6_00 ,_lowerCAmelCase = 2.0 ,_lowerCAmelCase = 3.1 ,_lowerCAmelCase = 8 ,_lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] ,_lowerCAmelCase = [32, 16, 24, 40, 80, 1_12, 1_92] ,_lowerCAmelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,_lowerCAmelCase = [] ,_lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] ,_lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] ,_lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] ,_lowerCAmelCase = 0.25 ,_lowerCAmelCase = "swish" ,_lowerCAmelCase = 25_60 ,_lowerCAmelCase = "mean" ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 0.001 ,_lowerCAmelCase = 0.99 ,_lowerCAmelCase = 0.2 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = width_coefficient lowerCamelCase__ = depth_coefficient lowerCamelCase__ = depth_divisor lowerCamelCase__ = kernel_sizes lowerCamelCase__ = in_channels lowerCamelCase__ = out_channels lowerCamelCase__ = depthwise_padding lowerCamelCase__ = strides lowerCamelCase__ = num_block_repeats lowerCamelCase__ = expand_ratios lowerCamelCase__ = squeeze_expansion_ratio lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dim lowerCamelCase__ = pooling_type lowerCamelCase__ = initializer_range lowerCamelCase__ = batch_norm_eps lowerCamelCase__ = batch_norm_momentum lowerCamelCase__ = drop_connect_rate lowerCamelCase__ = sum(_lowerCAmelCase ) * 4 @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): cls._set_token_in_kwargs(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": lowerCamelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'align' _UpperCamelCase = True def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=0.02 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) if text_config is None: lowerCamelCase__ = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: lowerCamelCase__ = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) lowerCamelCase__ = AlignTextConfig(**_lowerCAmelCase ) lowerCamelCase__ = AlignVisionConfig(**_lowerCAmelCase ) lowerCamelCase__ = projection_dim lowerCamelCase__ = temperature_init_value lowerCamelCase__ = initializer_range @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.text_config.to_dict() lowerCamelCase__ = self.vision_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A__ ( __lowerCAmelCase : List[str] ): for param in module.parameters(): lowerCamelCase__ = False def A__ ( ): lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__ = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = plt.imshow(__lowerCAmelCase ) fig.axes.get_xaxis().set_visible(__lowerCAmelCase ) fig.axes.get_yaxis().set_visible(__lowerCAmelCase ) plt.show() def A__ ( ): lowerCamelCase__ = datetime.now() lowerCamelCase__ = current_time.strftime("""%H:%M:%S""" ) return timestamp
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = eval_examples lowerCamelCase__ = post_process_function def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,_lowerCAmelCase=None ,_lowerCAmelCase = None ,_lowerCAmelCase = "eval" ,**_lowerCAmelCase ,): lowerCamelCase__ = gen_kwargs.copy() lowerCamelCase__ = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) lowerCamelCase__ = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) lowerCamelCase__ = gen_kwargs lowerCamelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase__ = self.get_eval_dataloader(_lowerCAmelCase ) lowerCamelCase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase__ = self.compute_metrics lowerCamelCase__ = None lowerCamelCase__ = time.time() lowerCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase__ = eval_loop( _lowerCAmelCase ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_lowerCAmelCase ,metric_key_prefix=_lowerCAmelCase ,) finally: lowerCamelCase__ = compute_metrics lowerCamelCase__ = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _lowerCAmelCase ,_lowerCAmelCase ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase__ = self.post_process_function(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.compute_metrics(_lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase__ = metrics.pop(_lowerCAmelCase ) metrics.update(output.metrics ) else: lowerCamelCase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowerCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase__ = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_lowerCAmelCase ) return metrics def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase = "test" ,**_lowerCAmelCase ): lowerCamelCase__ = gen_kwargs.copy() lowerCamelCase__ = self.get_test_dataloader(_lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase__ = self.compute_metrics lowerCamelCase__ = None lowerCamelCase__ = time.time() lowerCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase__ = eval_loop( _lowerCAmelCase ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_lowerCAmelCase ,metric_key_prefix=_lowerCAmelCase ,) finally: lowerCamelCase__ = compute_metrics lowerCamelCase__ = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _lowerCAmelCase ,_lowerCAmelCase ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase__ = self.post_process_function(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,"""predict""" ) lowerCamelCase__ = self.compute_metrics(_lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase__ = metrics.pop(_lowerCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_lowerCAmelCase )
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): super().__init__( features=_lowerCAmelCase ,cache_dir=_lowerCAmelCase ,keep_in_memory=_lowerCAmelCase ,streaming=_lowerCAmelCase ,num_proc=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = Generator( cache_dir=_lowerCAmelCase ,features=_lowerCAmelCase ,generator=_lowerCAmelCase ,gen_kwargs=_lowerCAmelCase ,**_lowerCAmelCase ,) def UpperCamelCase_ ( self ): # Build iterable dataset if self.streaming: lowerCamelCase__ = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None self.builder.download_and_prepare( download_config=_lowerCAmelCase ,download_mode=_lowerCAmelCase ,verification_mode=_lowerCAmelCase ,base_path=_lowerCAmelCase ,num_proc=self.num_proc ,) lowerCamelCase__ = self.builder.as_dataset( split="""train""" ,verification_mode=_lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig UpperCamelCase : Dict = logging.get_logger(__name__) # General docstring UpperCamelCase : Dict = 'MobileNetV1Config' # Base docstring UpperCamelCase : Tuple = 'google/mobilenet_v1_1.0_224' UpperCamelCase : Any = [1, 10_24, 7, 7] # Image classification docstring UpperCamelCase : List[Any] = 'google/mobilenet_v1_1.0_224' UpperCamelCase : int = 'tabby, tabby cat' UpperCamelCase : Optional[Any] = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str=None ): lowerCamelCase__ = {} if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = model.mobilenet_va else: lowerCamelCase__ = model lowerCamelCase__ = """MobilenetV1/Conv2d_0/""" lowerCamelCase__ = backbone.conv_stem.convolution.weight lowerCamelCase__ = backbone.conv_stem.normalization.bias lowerCamelCase__ = backbone.conv_stem.normalization.weight lowerCamelCase__ = backbone.conv_stem.normalization.running_mean lowerCamelCase__ = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCamelCase__ = i + 1 lowerCamelCase__ = i * 2 lowerCamelCase__ = backbone.layer[pt_index] lowerCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowerCamelCase__ = pointer.convolution.weight lowerCamelCase__ = pointer.normalization.bias lowerCamelCase__ = pointer.normalization.weight lowerCamelCase__ = pointer.normalization.running_mean lowerCamelCase__ = pointer.normalization.running_var lowerCamelCase__ = backbone.layer[pt_index + 1] lowerCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowerCamelCase__ = pointer.convolution.weight lowerCamelCase__ = pointer.normalization.bias lowerCamelCase__ = pointer.normalization.weight lowerCamelCase__ = pointer.normalization.running_mean lowerCamelCase__ = pointer.normalization.running_var if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = """MobilenetV1/Logits/Conv2d_1c_1x1/""" lowerCamelCase__ = model.classifier.weight lowerCamelCase__ = model.classifier.bias return tf_to_pt_map def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model lowerCamelCase__ = tf.train.list_variables(__lowerCAmelCase ) lowerCamelCase__ = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) lowerCamelCase__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = array # Build TF to PyTorch weights loading map lowerCamelCase__ = _build_tf_to_pytorch_map(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue lowerCamelCase__ = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) lowerCamelCase__ = np.transpose(__lowerCAmelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer lowerCamelCase__ = array.squeeze().transpose() else: lowerCamelCase__ = np.transpose(__lowerCAmelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) tf_weights.pop(__lowerCAmelCase , __lowerCAmelCase ) tf_weights.pop(name + """/RMSProp""" , __lowerCAmelCase ) tf_weights.pop(name + """/RMSProp_1""" , __lowerCAmelCase ) tf_weights.pop(name + """/ExponentialMovingAverage""" , __lowerCAmelCase ) logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def A__ ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : nn.Convad ): lowerCamelCase__ , lowerCamelCase__ = features.shape[-2:] lowerCamelCase__ , lowerCamelCase__ = conv_layer.stride lowerCamelCase__ , lowerCamelCase__ = conv_layer.kernel_size if in_height % stride_height == 0: lowerCamelCase__ = max(kernel_height - stride_height , 0 ) else: lowerCamelCase__ = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCamelCase__ = max(kernel_width - stride_width , 0 ) else: lowerCamelCase__ = max(kernel_width - (in_width % stride_width) , 0 ) lowerCamelCase__ = pad_along_width // 2 lowerCamelCase__ = pad_along_width - pad_left lowerCamelCase__ = pad_along_height // 2 lowerCamelCase__ = pad_along_height - pad_top lowerCamelCase__ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowerCAmelCase , __lowerCAmelCase , """constant""" , 0.0 ) class UpperCamelCase__ (nn.Module ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = False ,_lowerCAmelCase = True ,_lowerCAmelCase = True ,): super().__init__() lowerCamelCase__ = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowerCamelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCamelCase__ = nn.Convad( in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=_lowerCAmelCase ,stride=_lowerCAmelCase ,padding=_lowerCAmelCase ,groups=_lowerCAmelCase ,bias=_lowerCAmelCase ,padding_mode="""zeros""" ,) if use_normalization: lowerCamelCase__ = nn.BatchNormad( num_features=_lowerCAmelCase ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=_lowerCAmelCase ,track_running_stats=_lowerCAmelCase ,) else: lowerCamelCase__ = None if use_activation: if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = ACTaFN[use_activation] elif isinstance(config.hidden_act ,_lowerCAmelCase ): lowerCamelCase__ = ACTaFN[config.hidden_act] else: lowerCamelCase__ = config.hidden_act else: lowerCamelCase__ = None def UpperCamelCase_ ( self ,_lowerCAmelCase ): if self.config.tf_padding: lowerCamelCase__ = apply_tf_padding(_lowerCAmelCase ,self.convolution ) lowerCamelCase__ = self.convolution(_lowerCAmelCase ) if self.normalization is not None: lowerCamelCase__ = self.normalization(_lowerCAmelCase ) if self.activation is not None: lowerCamelCase__ = self.activation(_lowerCAmelCase ) return features class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = MobileNetVaConfig _UpperCamelCase = load_tf_weights_in_mobilenet_va _UpperCamelCase = 'mobilenet_v1' _UpperCamelCase = 'pixel_values' _UpperCamelCase = False def UpperCamelCase_ ( self ,_lowerCAmelCase ): if isinstance(_lowerCAmelCase ,(nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_lowerCAmelCase ,nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) UpperCamelCase : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCamelCase : List[str] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' ,a ,) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = True ): super().__init__(_lowerCAmelCase ) lowerCamelCase__ = config lowerCamelCase__ = 32 lowerCamelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth ) lowerCamelCase__ = MobileNetVaConvLayer( _lowerCAmelCase ,in_channels=config.num_channels ,out_channels=_lowerCAmelCase ,kernel_size=3 ,stride=2 ,) lowerCamelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCamelCase__ = nn.ModuleList() for i in range(13 ): lowerCamelCase__ = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCamelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth ) self.layer.append( MobileNetVaConvLayer( _lowerCAmelCase ,in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=3 ,stride=strides[i] ,groups=_lowerCAmelCase ,) ) self.layer.append( MobileNetVaConvLayer( _lowerCAmelCase ,in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=1 ,) ) lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,): lowerCamelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowerCamelCase__ = self.conv_stem(_lowerCAmelCase ) lowerCamelCase__ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCamelCase__ = layer_module(_lowerCAmelCase ) if output_hidden_states: lowerCamelCase__ = all_hidden_states + (hidden_states,) lowerCamelCase__ = hidden_states if self.pooler is not None: lowerCamelCase__ = torch.flatten(self.pooler(_lowerCAmelCase ) ,start_dim=1 ) else: lowerCamelCase__ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_lowerCAmelCase ,pooler_output=_lowerCAmelCase ,hidden_states=_lowerCAmelCase ,) @add_start_docstrings( '\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' ,a ,) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ): super().__init__(_lowerCAmelCase ) lowerCamelCase__ = config.num_labels lowerCamelCase__ = MobileNetVaModel(_lowerCAmelCase ) lowerCamelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCamelCase__ = nn.Dropout(config.classifier_dropout_prob ,inplace=_lowerCAmelCase ) lowerCamelCase__ = nn.Linear(_lowerCAmelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,): lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ = self.mobilenet_va(_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase ,return_dict=_lowerCAmelCase ) lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase__ = self.classifier(self.dropout(_lowerCAmelCase ) ) lowerCamelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ = """single_label_classification""" else: lowerCamelCase__ = """multi_label_classification""" if self.config.problem_type == "regression": lowerCamelCase__ = MSELoss() if self.num_labels == 1: lowerCamelCase__ = loss_fct(logits.squeeze() ,labels.squeeze() ) else: lowerCamelCase__ = loss_fct(_lowerCAmelCase ,_lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ = CrossEntropyLoss() lowerCamelCase__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ = BCEWithLogitsLoss() lowerCamelCase__ = loss_fct(_lowerCAmelCase ,_lowerCAmelCase ) if not return_dict: lowerCamelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_lowerCAmelCase ,logits=_lowerCAmelCase ,hidden_states=outputs.hidden_states ,)
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : int = 4 ): lowerCamelCase__ = abs(__lowerCAmelCase ) or 4 return [[1 + x + y * row_size for x in range(__lowerCAmelCase )] for y in range(__lowerCAmelCase )] def A__ ( __lowerCAmelCase : list[list[int]] ): return reverse_row(transpose(__lowerCAmelCase ) ) # OR.. transpose(reverse_column(matrix)) def A__ ( __lowerCAmelCase : list[list[int]] ): return reverse_row(reverse_column(__lowerCAmelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def A__ ( __lowerCAmelCase : list[list[int]] ): return reverse_column(transpose(__lowerCAmelCase ) ) # OR.. transpose(reverse_row(matrix)) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = [list(__lowerCAmelCase ) for x in zip(*__lowerCAmelCase )] return matrix def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = matrix[::-1] return matrix def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = [x[::-1] for x in matrix] return matrix def A__ ( __lowerCAmelCase : list[list[int]] ): for i in matrix: print(*__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) UpperCamelCase : int = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) UpperCamelCase : Dict = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' from collections import defaultdict def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = first_str.lower().strip() lowerCamelCase__ = second_str.lower().strip() # Remove whitespace lowerCamelCase__ = first_str.replace(""" """ , """""" ) lowerCamelCase__ = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): return False # Default values for count should be 0 lowerCamelCase__ = defaultdict(__lowerCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(__lowerCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase : List[Any] = input('Enter the first string ').strip() UpperCamelCase : List[str] = input('Enter the second string ').strip() UpperCamelCase : Union[str, Any] = check_anagrams(input_a, input_b) print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'lilt' def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=None ,_lowerCAmelCase=4 ,_lowerCAmelCase=10_24 ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_act lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = position_embedding_type lowerCamelCase__ = classifier_dropout lowerCamelCase__ = channel_shrink_ratio lowerCamelCase__ = max_ad_position_embeddings
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=[10, 20, 30, 40] ,_lowerCAmelCase=[1, 1, 2, 1] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = embeddings_size lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_act lowerCamelCase__ = num_labels lowerCamelCase__ = scope lowerCamelCase__ = len(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFResNetModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFResNetForImageClassification(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCamelCase = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFResNetModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase__ = layer_type lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFResNetModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_lowerCAmelCase ,atol=1E-4 ) )
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase : Optional[Any] = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( __lowerCAmelCase : int = 1_0001 ): try: lowerCamelCase__ = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) lowerCamelCase__ = [] lowerCamelCase__ = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) lowerCamelCase__ = number_of_bytes // partitions lowerCamelCase__ = [] for i in range(__lowerCAmelCase ): lowerCamelCase__ = i * bytes_per_partition + 1 lowerCamelCase__ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' import warnings from .generation import TFGenerationMixin class UpperCamelCase__ (a ): '''simple docstring''' warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' ,a ,)
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCamelCase : List[Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['image_processor', 'tokenizer'] _UpperCamelCase = 'LayoutLMv3ImageProcessor' _UpperCamelCase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_lowerCAmelCase ,) lowerCamelCase__ = kwargs.pop("""feature_extractor""" ) lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase__ = features["""words"""] lowerCamelCase__ = self.tokenizer( text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase ,stride=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,return_overflowing_tokens=_lowerCAmelCase ,return_special_tokens_mask=_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,return_length=_lowerCAmelCase ,verbose=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ,) # add pixel values lowerCamelCase__ = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: lowerCamelCase__ = self.get_overflowing_images(_lowerCAmelCase ,encoded_inputs["""overflow_to_sample_mapping"""] ) lowerCamelCase__ = images return encoded_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F''' {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}''' ) return images_with_overflow def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_lowerCAmelCase ,) return self.image_processor_class @property def UpperCamelCase_ ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_lowerCAmelCase ,) return self.image_processor
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def A__ ( __lowerCAmelCase : List[Any] ): return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def A__ ( ): lowerCamelCase__ = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__lowerCAmelCase ) lowerCamelCase__ = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__lowerCAmelCase ) EnvironmentCommand.register_subcommand(__lowerCAmelCase ) TestCommand.register_subcommand(__lowerCAmelCase ) RunBeamCommand.register_subcommand(__lowerCAmelCase ) DummyDataCommand.register_subcommand(__lowerCAmelCase ) # Parse args lowerCamelCase__ , lowerCamelCase__ = parser.parse_known_args() if not hasattr(__lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) lowerCamelCase__ = parse_unknown_args(__lowerCAmelCase ) # Run lowerCamelCase__ = args.func(__lowerCAmelCase , **__lowerCAmelCase ) service.run() if __name__ == "__main__": main()
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase__ : '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError() def UpperCamelCase_ ( self ): raise NotImplementedError() class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,**_lowerCAmelCase ): lowerCamelCase__ = tokenizer lowerCamelCase__ = skip_prompt lowerCamelCase__ = decode_kwargs # variables used in the streaming process lowerCamelCase__ = [] lowerCamelCase__ = 0 lowerCamelCase__ = True def UpperCamelCase_ ( self ,_lowerCAmelCase ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: lowerCamelCase__ = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCamelCase__ = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowerCamelCase__ = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): lowerCamelCase__ = text[self.print_len :] lowerCamelCase__ = [] lowerCamelCase__ = 0 # If the last token is a CJK character, we print the characters. elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowerCamelCase__ = text[self.print_len :] self.print_len += len(_lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCamelCase__ = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(_lowerCAmelCase ) self.on_finalized_text(_lowerCAmelCase ) def UpperCamelCase_ ( self ): # Flush the cache, if it exists if len(self.token_cache ) > 0: lowerCamelCase__ = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) lowerCamelCase__ = text[self.print_len :] lowerCamelCase__ = [] lowerCamelCase__ = 0 else: lowerCamelCase__ = """""" lowerCamelCase__ = True self.on_finalized_text(_lowerCAmelCase ,stream_end=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): print(_lowerCAmelCase ,flush=_lowerCAmelCase ,end="""""" if not stream_end else None ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,**_lowerCAmelCase ): super().__init__(_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = Queue() lowerCamelCase__ = None lowerCamelCase__ = timeout def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): self.text_queue.put(_lowerCAmelCase ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self ): return self def UpperCamelCase_ ( self ): lowerCamelCase__ = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCamelCase : Any = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__lowerCAmelCase ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['pixel_values'] def __init__( self ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = 1 / 2_55 ,_lowerCAmelCase = True ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 2_56} lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" ) lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = resample lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = offset lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) if "shortest_edge" in size: lowerCamelCase__ = get_resize_output_image_size(_lowerCAmelCase ,size["""shortest_edge"""] ,default_to_square=_lowerCAmelCase ) elif "height" in size and "width" in size: lowerCamelCase__ = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = image.astype(np.floataa ) if offset: lowerCamelCase__ = image - (scale / 2) return rescale(_lowerCAmelCase ,scale=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): return normalize(_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowerCamelCase__ = to_numpy_array(_lowerCAmelCase ) if do_resize: lowerCamelCase__ = self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) if do_center_crop: lowerCamelCase__ = self.center_crop(_lowerCAmelCase ,size=_lowerCAmelCase ) if do_rescale: lowerCamelCase__ = self.rescale(image=_lowerCAmelCase ,scale=_lowerCAmelCase ,offset=_lowerCAmelCase ) if do_normalize: lowerCamelCase__ = self.normalize(image=_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ) lowerCamelCase__ = to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) return image def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,): lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = resample if resample is not None else self.resample lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ = offset if offset is not None else self.offset lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ = image_std if image_std is not None else self.image_std lowerCamelCase__ = size if size is not None else self.size lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase ) lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" ) if not valid_images(_lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowerCamelCase__ = make_batched(_lowerCAmelCase ) lowerCamelCase__ = [ [ self._preprocess_image( image=_lowerCAmelCase ,do_resize=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,do_center_crop=_lowerCAmelCase ,crop_size=_lowerCAmelCase ,do_rescale=_lowerCAmelCase ,rescale_factor=_lowerCAmelCase ,offset=_lowerCAmelCase ,do_normalize=_lowerCAmelCase ,image_mean=_lowerCAmelCase ,image_std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,) for img in video ] for video in videos ] lowerCamelCase__ = {"""pixel_values""": videos} return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase )
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCamelCase : Tuple = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['input_features'] def __init__( self ,_lowerCAmelCase=80 ,_lowerCAmelCase=1_60_00 ,_lowerCAmelCase=1_60 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): super().__init__( feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = n_fft lowerCamelCase__ = hop_length lowerCamelCase__ = chunk_length lowerCamelCase__ = chunk_length * sampling_rate lowerCamelCase__ = self.n_samples // hop_length lowerCamelCase__ = sampling_rate lowerCamelCase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_lowerCAmelCase ,min_frequency=0.0 ,max_frequency=8000.0 ,sampling_rate=_lowerCAmelCase ,norm="""slaney""" ,mel_scale="""slaney""" ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = spectrogram( _lowerCAmelCase ,window_function(self.n_fft ,"""hann""" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel="""log10""" ,) lowerCamelCase__ = log_spec[:, :-1] lowerCamelCase__ = np.maximum(_lowerCAmelCase ,log_spec.max() - 8.0 ) lowerCamelCase__ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 0.0 ): if attention_mask is not None: lowerCamelCase__ = np.array(_lowerCAmelCase ,np.intaa ) lowerCamelCase__ = [] for vector, length in zip(_lowerCAmelCase ,attention_mask.sum(-1 ) ): lowerCamelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowerCamelCase__ = padding_value normed_input_values.append(_lowerCAmelCase ) else: lowerCamelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "max_length" ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ): lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa ) elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [np.asarray([raw_speech] ).T] lowerCamelCase__ = BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding lowerCamelCase__ = self.pad( _lowerCAmelCase ,padding=_lowerCAmelCase ,max_length=max_length if max_length else self.n_samples ,truncation=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=return_attention_mask or do_normalize ,) # zero-mean and unit-variance normalization if do_normalize: lowerCamelCase__ = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] ,attention_mask=padded_inputs["""attention_mask"""] ,padding_value=self.padding_value ,) lowerCamelCase__ = np.stack(padded_inputs["""input_features"""] ,axis=0 ) # make sure list is in array format lowerCamelCase__ = padded_inputs.get("""input_features""" ).transpose(2 ,0 ,1 ) lowerCamelCase__ = [self._np_extract_fbank_features(_lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] ,_lowerCAmelCase ): lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in input_features] else: lowerCamelCase__ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCamelCase__ = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCamelCase__ = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCamelCase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCamelCase : Dict = [0, 25, 50] UpperCamelCase : Optional[int] = [25, 50, 75] UpperCamelCase : List[Any] = fuzz.membership.trimf(X, abca) UpperCamelCase : Union[str, Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCamelCase : int = np.ones(75) UpperCamelCase : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCamelCase : Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCamelCase : Any = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCamelCase : Optional[int] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCamelCase : Any = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCamelCase : Union[str, Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCamelCase : List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCamelCase : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Any = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() # fmt: off lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowerCamelCase__ = {"""unk_token""": """<unk>"""} lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) lowerCamelCase__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowerCamelCase__ = os.path.join(self.tmpdirname ,_lowerCAmelCase ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=_lowerCAmelCase ) lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(images=_lowerCAmelCase ,visual_prompt=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPSegProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = XLNetTokenizer _UpperCamelCase = XLNetTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = XLNetTokenizer(_lowerCAmelCase ,keep_accents=_lowerCAmelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""<eod>""" ) self.assertEqual(len(_lowerCAmelCase ) ,10_06 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = XLNetTokenizer(_lowerCAmelCase ,keep_accents=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[2_85, 46, 10, 1_70, 3_82] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = XLNetTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = XLNetTokenizer(_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def UpperCamelCase_ ( self ): # fmt: off lowerCamelCase__ = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=True ): model.train() lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=False ): set_seed(42 ) lowerCamelCase__ = RegressionModel() lowerCamelCase__ = deepcopy(__lowerCAmelCase ) lowerCamelCase__ = RegressionDataset(length=80 ) lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 ) model.to(accelerator.device ) if sched: lowerCamelCase__ = AdamW(params=model.parameters() , lr=1e-3 ) lowerCamelCase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) lowerCamelCase__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) lowerCamelCase__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) # Make a copy of `model` if sched: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def A__ ( __lowerCAmelCase : Any ): # Test when on a single CPU or GPU that the context manager does nothing lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase ) # Use a single batch lowerCamelCase__ , lowerCamelCase__ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def A__ ( __lowerCAmelCase : Tuple ): # Test on distributed setup that context manager behaves properly lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase ) # Use a single batch lowerCamelCase__ , lowerCamelCase__ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def A__ ( __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[int]=False ): lowerCamelCase__ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = batch.values() # Gather the distributed inputs and targs for the base model lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] GradientState._reset_state() def A__ ( __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = batch.values() # Gather the distributed inputs and targs for the base model lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowerCamelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def A__ ( ): lowerCamelCase__ = Accelerator() lowerCamelCase__ = RegressionDataset(length=80 ) lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 ) lowerCamelCase__ = RegressionDataset(length=96 ) lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 ) lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if iteration < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if batch_num < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def A__ ( ): lowerCamelCase__ = Accelerator() lowerCamelCase__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(__lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(__lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' from collections import defaultdict def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 1 lowerCamelCase__ = True for v in tree[start]: if v not in visited: ret += dfs(__lowerCAmelCase ) if ret % 2 == 0: cuts.append(__lowerCAmelCase ) return ret def A__ ( ): dfs(1 ) if __name__ == "__main__": UpperCamelCase , UpperCamelCase : Tuple = 10, 9 UpperCamelCase : Optional[int] = defaultdict(list) UpperCamelCase : dict[int, bool] = {} UpperCamelCase : list[int] = [] UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCamelCase : List[Any] = ['bert-base-uncased', 'bert-base-cased'] UpperCamelCase : List[str] = 'hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class UpperCamelCase__ (tf.keras.Model ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ): super().__init__() lowerCamelCase__ = tokenizer lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TFAutoModel.from_config(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ) lowerCamelCase__ = self.bert(**_lowerCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): super().setUp() lowerCamelCase__ = [ BertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false lowerCamelCase__ = [TFBertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_lowerCAmelCase ,use_fast_bert_tokenizer=_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCamelCase__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] lowerCamelCase__ = list(zip(self.test_sentences ,self.test_sentences[::-1] ) ) def UpperCamelCase_ ( self ): for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_tensors="""tf""" ,padding="""longest""" ) lowerCamelCase__ = tf_tokenizer(_lowerCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) ) @slow def UpperCamelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ = tf_tokenizer(self.paired_sentences ) lowerCamelCase__ = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) ) @slow def UpperCamelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ = tf.function(_lowerCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): lowerCamelCase__ = tf.constant(_lowerCAmelCase ) lowerCamelCase__ = compiled_tokenizer(_lowerCAmelCase ) lowerCamelCase__ = tf_tokenizer(_lowerCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCamelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ = ModelToSave(tokenizer=_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor(self.test_sentences ) lowerCamelCase__ = model(_lowerCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCamelCase__ = Path(_lowerCAmelCase ) / """saved.model""" model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model(_lowerCAmelCase ) lowerCamelCase__ = loaded_model(_lowerCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1E-5 )
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase : Optional[Any] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['DeiTFeatureExtractor'] UpperCamelCase : List[str] = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() # fmt: off lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowerCamelCase__ = {"""unk_token""": """<unk>"""} lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) lowerCamelCase__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowerCamelCase__ = os.path.join(self.tmpdirname ,_lowerCAmelCase ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_lowerCAmelCase ) lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = s.rsplit(__lowerCAmelCase , __lowerCAmelCase ) return new.join(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = {} lowerCamelCase__ = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: lowerCamelCase__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' ) if "res_path" in key: lowerCamelCase__ = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): lowerCamelCase__ = rreplace(__lowerCAmelCase , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): lowerCamelCase__ = rreplace(__lowerCAmelCase , """.b""" , """.bias""" , 1 ) lowerCamelCase__ = value.float() return upgrade @torch.no_grad() def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=True ): from dall_e import Encoder lowerCamelCase__ = Encoder() if os.path.exists(__lowerCAmelCase ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) else: lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = ckpt.state_dict() encoder.load_state_dict(__lowerCAmelCase ) if config_path is not None: lowerCamelCase__ = FlavaImageCodebookConfig.from_pretrained(__lowerCAmelCase ) else: lowerCamelCase__ = FlavaImageCodebookConfig() lowerCamelCase__ = FlavaImageCodebook(__lowerCAmelCase ).eval() lowerCamelCase__ = encoder.state_dict() lowerCamelCase__ = upgrade_state_dict(__lowerCAmelCase ) hf_model.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = hf_model.state_dict() lowerCamelCase__ = count_parameters(__lowerCAmelCase ) lowerCamelCase__ = count_parameters(__lowerCAmelCase ) assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(__lowerCAmelCase ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCamelCase : List[Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva UpperCamelCase : Dict = '' UpperCamelCase : Any = '' UpperCamelCase : Optional[Any] = '' UpperCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def A__ ( ): lowerCamelCase__ , lowerCamelCase__ = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print("""Processing...""" ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowerCamelCase__ = random_chars(32 ) lowerCamelCase__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] lowerCamelCase__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) lowerCamelCase__ = [] for anno in new_annos[index]: lowerCamelCase__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(F'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = [] lowerCamelCase__ = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ): lowerCamelCase__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCAmelCase ) as in_file: lowerCamelCase__ = in_file.readlines() lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' ) lowerCamelCase__ = [] for obj_list in obj_lists: lowerCamelCase__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ): lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for idx in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = [] lowerCamelCase__ = img_list[idx] path_list.append(__lowerCAmelCase ) lowerCamelCase__ = anno_list[idx] lowerCamelCase__ = cva.imread(__lowerCAmelCase ) if flip_type == 1: lowerCamelCase__ = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: lowerCamelCase__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: lowerCamelCase__ = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: lowerCamelCase__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def A__ ( __lowerCAmelCase : int = 32 ): assert number_char > 1, "The number of character should greater than 1" lowerCamelCase__ = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import pprint import requests UpperCamelCase : Union[str, Any] = 'https://zenquotes.io/api' def A__ ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def A__ ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCamelCase : Dict = random_quotes() pprint.pprint(response)
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None # sigma(t_i) @classmethod def UpperCamelCase_ ( cls ): return cls() @dataclass class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 class UpperCamelCase__ (a ,a ): '''simple docstring''' @property def UpperCamelCase_ ( self ): return True @register_to_config def __init__( self ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = 1.007 ,_lowerCAmelCase = 80 ,_lowerCAmelCase = 0.05 ,_lowerCAmelCase = 50 ,): pass def UpperCamelCase_ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = () ): lowerCamelCase__ = jnp.arange(0 ,_lowerCAmelCase )[::-1].copy() lowerCamelCase__ = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_lowerCAmelCase ,schedule=jnp.array(_lowerCAmelCase ,dtype=jnp.floataa ) ,timesteps=_lowerCAmelCase ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase__ = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 ) else: lowerCamelCase__ = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase__ = random.split(_lowerCAmelCase ,num=1 ) lowerCamelCase__ = self.config.s_noise * random.normal(key=_lowerCAmelCase ,shape=sample.shape ) lowerCamelCase__ = sigma + gamma * sigma lowerCamelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ,): lowerCamelCase__ = sample_hat + sigma_hat * model_output lowerCamelCase__ = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase ,derivative=_lowerCAmelCase ,state=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ,): lowerCamelCase__ = sample_prev + sigma_prev * model_output lowerCamelCase__ = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase ,derivative=_lowerCAmelCase ,state=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): raise NotImplementedError()
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' UpperCamelCase : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCamelCase : str = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCamelCase : Any = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = field(default='automatic-speech-recognition' ,metadata={'include_in_asdict_even_if_is_default': True} ) _UpperCamelCase = Features({'audio': Audio()} ) _UpperCamelCase = Features({'transcription': Value('string' )} ) _UpperCamelCase = "audio" _UpperCamelCase = "transcription" def UpperCamelCase_ ( self ,_lowerCAmelCase ): if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,_lowerCAmelCase ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) lowerCamelCase__ = copy.deepcopy(self ) lowerCamelCase__ = self.input_schema.copy() lowerCamelCase__ = features[self.audio_column] lowerCamelCase__ = input_schema return task_template @property def UpperCamelCase_ ( self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase : int = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Optional[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : List[str] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : Dict = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ): if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' UpperCamelCase : Optional[Any] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): lowerCamelCase__ = set() # keep track of all the paths to be checked lowerCamelCase__ = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowerCamelCase__ = queue.pop(0 ) # get the last node from the path lowerCamelCase__ = path[-1] if node not in explored: lowerCamelCase__ = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowerCamelCase__ = list(__lowerCAmelCase ) new_path.append(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__lowerCAmelCase ) # in case there's no path between the 2 nodes return [] def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowerCamelCase__ = [start] lowerCamelCase__ = set(__lowerCAmelCase ) # Keep tab on distances from `start` node. lowerCamelCase__ = {start: 0, target: -1} while queue: lowerCamelCase__ = queue.pop(0 ) if node == target: lowerCamelCase__ = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) lowerCamelCase__ = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase : int = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def A__ ( __lowerCAmelCase : str ): if "://" in dataset_path: lowerCamelCase__ = dataset_path.split("""://""" )[1] return dataset_path def A__ ( __lowerCAmelCase : fsspec.AbstractFileSystem ): if fs is not None and fs.protocol != "file": return True else: return False def A__ ( __lowerCAmelCase : fsspec.AbstractFileSystem , __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = not is_remote_filesystem(__lowerCAmelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) ) else: fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase ) def A__ ( ): if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = threading.Lock()
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : str = {'vocab_file': 'vocab.txt'} UpperCamelCase : int = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } UpperCamelCase : Dict = { 'facebook/esm2_t6_8M_UR50D': 10_24, 'facebook/esm2_t12_35M_UR50D': 10_24, } def A__ ( __lowerCAmelCase : List[str] ): with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = f.read().splitlines() return [l.strip() for l in lines] class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<cls>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase="<eos>" ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = load_vocab_file(_lowerCAmelCase ) lowerCamelCase__ = dict(enumerate(self.all_tokens ) ) lowerCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCamelCase__ = unk_token lowerCamelCase__ = cls_token lowerCamelCase__ = pad_token lowerCamelCase__ = mask_token lowerCamelCase__ = eos_token lowerCamelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._id_to_token.get(_lowerCAmelCase ,self.unk_token ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._token_to_id.get(_lowerCAmelCase ,self._token_to_id.get(self.unk_token ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ): return text.split() def UpperCamelCase_ ( self ,_lowerCAmelCase=False ): return len(self._id_to_token ) def UpperCamelCase_ ( self ): return {token: i for i, token in enumerate(self.all_tokens )} def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._token_to_id.get(_lowerCAmelCase ,self._token_to_id.get(self.unk_token ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._id_to_token.get(_lowerCAmelCase ,self.unk_token ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCamelCase__ = [1] + ([0] * len(_lowerCAmelCase )) + [1] if token_ids_a is not None: mask += [0] * len(_lowerCAmelCase ) + [1] return mask def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = os.path.join(_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_lowerCAmelCase ,"""w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def UpperCamelCase_ ( self ): return self.get_vocab_size(with_added_tokens=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): return super()._add_tokens(_lowerCAmelCase ,special_tokens=_lowerCAmelCase )
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCamelCase : Optional[int] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCamelCase : Dict = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' UpperCamelCase : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def A__ ( __lowerCAmelCase : Optional[int] ): def remove_articles(__lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(__lowerCAmelCase , """ """ , __lowerCAmelCase ) def white_space_fix(__lowerCAmelCase : int ): return " ".join(text.split() ) def remove_punc(__lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCAmelCase : str ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int ): return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): lowerCamelCase__ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )] return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 100 def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): lowerCamelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams] lowerCamelCase__ = Counter(__lowerCAmelCase ) lowerCamelCase__ = Counter(__lowerCAmelCase ) lowerCamelCase__ = Counter() for sgram, scount in sgramcounter.items(): lowerCamelCase__ = scount * numref lowerCamelCase__ = Counter(__lowerCAmelCase ) lowerCamelCase__ = Counter() for cgram, ccount in cgramcounter.items(): lowerCamelCase__ = ccount * numref # KEEP lowerCamelCase__ = sgramcounter_rep & cgramcounter_rep lowerCamelCase__ = keepgramcounter_rep & rgramcounter lowerCamelCase__ = sgramcounter_rep & rgramcounter lowerCamelCase__ = 0 lowerCamelCase__ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase__ = 1 lowerCamelCase__ = 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = keeptmpscorea / len(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) lowerCamelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) lowerCamelCase__ = 0 if keepscore_precision > 0 or keepscore_recall > 0: lowerCamelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION lowerCamelCase__ = sgramcounter_rep - cgramcounter_rep lowerCamelCase__ = delgramcounter_rep - rgramcounter lowerCamelCase__ = sgramcounter_rep - rgramcounter lowerCamelCase__ = 0 lowerCamelCase__ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase__ = 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = deltmpscorea / len(__lowerCAmelCase ) # ADDITION lowerCamelCase__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) lowerCamelCase__ = set(__lowerCAmelCase ) & set(__lowerCAmelCase ) lowerCamelCase__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) lowerCamelCase__ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase__ = 1 lowerCamelCase__ = 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = addtmpscore / len(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = addtmpscore / len(__lowerCAmelCase ) lowerCamelCase__ = 0 if addscore_precision > 0 or addscore_recall > 0: lowerCamelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int ): lowerCamelCase__ = len(__lowerCAmelCase ) lowerCamelCase__ = ssent.split(""" """ ) lowerCamelCase__ = csent.split(""" """ ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for rsent in rsents: lowerCamelCase__ = rsent.split(""" """ ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] ragramslist.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: lowerCamelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) ragramslist.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: lowerCamelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(__lowerCAmelCase ) for i in range(0 , len(__lowerCAmelCase ) - 1 ): if i < len(__lowerCAmelCase ) - 1: lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 2: lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(__lowerCAmelCase ) if i < len(__lowerCAmelCase ) - 3: lowerCamelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(__lowerCAmelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 lowerCamelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4 lowerCamelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4 lowerCamelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "13a" , __lowerCAmelCase : bool = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: lowerCamelCase__ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: lowerCamelCase__ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase ) else: lowerCamelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase ) elif tokenizer == "moses": lowerCamelCase__ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase ) elif tokenizer == "penn": lowerCamelCase__ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase ) else: lowerCamelCase__ = sentence if not return_str: lowerCamelCase__ = normalized_sent.split() return normalized_sent def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )): raise ValueError("""Sources length must match predictions and references lengths.""" ) lowerCamelCase__ = 0 for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] ) lowerCamelCase__ = sari_score / len(__lowerCAmelCase ) return 100 * sari_score def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="exp" , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : str=False , ): lowerCamelCase__ = len(references[0] ) if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) lowerCamelCase__ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )] lowerCamelCase__ = sacrebleu.corpus_bleu( __lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ), } ) ,codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] ,reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = {} result.update({"""sari""": compute_sari(sources=_lowerCAmelCase ,predictions=_lowerCAmelCase ,references=_lowerCAmelCase )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )} ) result.update({"""exact""": compute_em(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )} ) return result
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'Speech2TextFeatureExtractor' _UpperCamelCase = 'Speech2TextTokenizer' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.feature_extractor lowerCamelCase__ = False def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCAmelCase ,**_lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) lowerCamelCase__ = kwargs.pop("""raw_speech""" ) else: lowerCamelCase__ = kwargs.pop("""audio""" ,_lowerCAmelCase ) lowerCamelCase__ = kwargs.pop("""sampling_rate""" ,_lowerCAmelCase ) lowerCamelCase__ = kwargs.pop("""text""" ,_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: lowerCamelCase__ = args[0] lowerCamelCase__ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: lowerCamelCase__ = self.feature_extractor(_lowerCAmelCase ,*_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,**_lowerCAmelCase ) if text is not None: lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,**_lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase__ = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase ) @contextmanager def UpperCamelCase_ ( self ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) lowerCamelCase__ = True lowerCamelCase__ = self.tokenizer yield lowerCamelCase__ = self.feature_extractor lowerCamelCase__ = False
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml UpperCamelCase : Optional[Any] = NewType('DataClass', Any) UpperCamelCase : Dict = NewType('DataClassType', Any) def A__ ( __lowerCAmelCase : List[str] ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def A__ ( __lowerCAmelCase : list ): lowerCamelCase__ = {str(__lowerCAmelCase ): choice for choice in choices} return lambda __lowerCAmelCase : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( *, __lowerCAmelCase : Union[str, List[str]] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : Any = dataclasses.MISSING , __lowerCAmelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCAmelCase : dict = None , **__lowerCAmelCase : Union[str, Any] , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls lowerCamelCase__ = {} if aliases is not None: lowerCamelCase__ = aliases if help is not None: lowerCamelCase__ = help return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 def __init__( self ,_lowerCAmelCase ,**_lowerCAmelCase ): # To make the default appear when using --help if "formatter_class" not in kwargs: lowerCamelCase__ = ArgumentDefaultsHelpFormatter super().__init__(**_lowerCAmelCase ) if dataclasses.is_dataclass(_lowerCAmelCase ): lowerCamelCase__ = [dataclass_types] lowerCamelCase__ = list(_lowerCAmelCase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(_lowerCAmelCase ) @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = F'''--{field.name}''' lowerCamelCase__ = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,_lowerCAmelCase ): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""" ) lowerCamelCase__ = kwargs.pop("""aliases""" ,[] ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [aliases] lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type ) if origin_type is Union or (hasattr(_lowerCAmelCase ,"""UnionType""" ) and isinstance(_lowerCAmelCase ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" F''' Problem encountered in field \'{field.name}\'.''' ) if type(_lowerCAmelCase ) not in field.type.__args__: # filter `str` in Union lowerCamelCase__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) lowerCamelCase__ = ( field.type.__args__[0] if isinstance(_lowerCAmelCase ,field.type.__args__[1] ) else field.type.__args__[1] ) lowerCamelCase__ = getattr(field.type ,"""__origin__""" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) lowerCamelCase__ = {} if origin_type is Literal or (isinstance(field.type ,_lowerCAmelCase ) and issubclass(field.type ,_lowerCAmelCase )): if origin_type is Literal: lowerCamelCase__ = field.type.__args__ else: lowerCamelCase__ = [x.value for x in field.type] lowerCamelCase__ = make_choice_type_function(kwargs["""choices"""] ) if field.default is not dataclasses.MISSING: lowerCamelCase__ = field.default else: lowerCamelCase__ = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument lowerCamelCase__ = copy(_lowerCAmelCase ) # Hack because type=bool in argparse does not behave as we want. lowerCamelCase__ = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. lowerCamelCase__ = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way lowerCamelCase__ = default # This tells argparse we accept 0 or 1 value after --field_name lowerCamelCase__ = """?""" # This is the value that will get picked if we do --field_name (without value) lowerCamelCase__ = True elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = field.type.__args__[0] lowerCamelCase__ = """+""" if field.default_factory is not dataclasses.MISSING: lowerCamelCase__ = field.default_factory() elif field.default is dataclasses.MISSING: lowerCamelCase__ = True else: lowerCamelCase__ = field.type if field.default is not dataclasses.MISSING: lowerCamelCase__ = field.default elif field.default_factory is not dataclasses.MISSING: lowerCamelCase__ = field.default_factory() else: lowerCamelCase__ = True parser.add_argument(_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): lowerCamelCase__ = False parser.add_argument(F'''--no_{field.name}''' ,action="""store_false""" ,dest=field.name ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if hasattr(_lowerCAmelCase ,"""_argument_group_name""" ): lowerCamelCase__ = self.add_argument_group(dtype._argument_group_name ) else: lowerCamelCase__ = self try: lowerCamelCase__ = get_type_hints(_lowerCAmelCase ) except NameError: raise RuntimeError( F'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ): lowerCamelCase__ = """.""".join(map(_lowerCAmelCase ,sys.version_info[:3] ) ) raise RuntimeError( F'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""" ) from ex raise for field in dataclasses.fields(_lowerCAmelCase ): if not field.init: continue lowerCamelCase__ = type_hints[field.name] self._parse_dataclass_field(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): lowerCamelCase__ = [] if args_filename: args_files.append(Path(_lowerCAmelCase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values lowerCamelCase__ = ArgumentParser() args_file_parser.add_argument(_lowerCAmelCase ,type=_lowerCAmelCase ,action="""append""" ) # Use only remaining args for further parsing (remove the args_file_flag) lowerCamelCase__ , lowerCamelCase__ = args_file_parser.parse_known_args(args=_lowerCAmelCase ) lowerCamelCase__ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) ,_lowerCAmelCase ) if cmd_args_file_paths: args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] ) lowerCamelCase__ = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last lowerCamelCase__ = file_args + args if args is not None else file_args + sys.argv[1:] lowerCamelCase__ , lowerCamelCase__ = self.parse_known_args(args=_lowerCAmelCase ) lowerCamelCase__ = [] for dtype in self.dataclass_types: lowerCamelCase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} lowerCamelCase__ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys} for k in keys: delattr(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(_lowerCAmelCase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): lowerCamelCase__ = set(args.keys() ) lowerCamelCase__ = [] for dtype in self.dataclass_types: lowerCamelCase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} lowerCamelCase__ = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) lowerCamelCase__ = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if not allow_extra_keys and unused_keys: raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}''' ) return tuple(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): with open(Path(_lowerCAmelCase ) ,encoding="""utf-8""" ) as open_json_file: lowerCamelCase__ = json.loads(open_json_file.read() ) lowerCamelCase__ = self.parse_dict(_lowerCAmelCase ,allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): lowerCamelCase__ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) ,allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase : List[Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['input_values', 'attention_mask'] def __init__( self ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1_60_00 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = False ,_lowerCAmelCase = 80 ,_lowerCAmelCase = 16 ,_lowerCAmelCase = 64 ,_lowerCAmelCase = "hann_window" ,_lowerCAmelCase = 1.0 ,_lowerCAmelCase = 80 ,_lowerCAmelCase = 76_00 ,_lowerCAmelCase = 1E-10 ,_lowerCAmelCase = 2 ,_lowerCAmelCase = True ,**_lowerCAmelCase ,): super().__init__(feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = do_normalize lowerCamelCase__ = return_attention_mask lowerCamelCase__ = num_mel_bins lowerCamelCase__ = hop_length lowerCamelCase__ = win_length lowerCamelCase__ = win_function lowerCamelCase__ = frame_signal_scale lowerCamelCase__ = fmin lowerCamelCase__ = fmax lowerCamelCase__ = mel_floor lowerCamelCase__ = reduction_factor lowerCamelCase__ = win_length * sampling_rate // 10_00 lowerCamelCase__ = hop_length * sampling_rate // 10_00 lowerCamelCase__ = optimal_fft_length(self.sample_size ) lowerCamelCase__ = (self.n_fft // 2) + 1 lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_lowerCAmelCase ) lowerCamelCase__ = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,) if frame_signal_scale != 1.0: warnings.warn( """The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,_lowerCAmelCase ,) if reduction_factor != 2.0: warnings.warn( """The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,_lowerCAmelCase ,) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 0.0 ): if attention_mask is not None: lowerCamelCase__ = np.array(_lowerCAmelCase ,np.intaa ) lowerCamelCase__ = [] for vector, length in zip(_lowerCAmelCase ,attention_mask.sum(-1 ) ): lowerCamelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowerCamelCase__ = padding_value normed_input_values.append(_lowerCAmelCase ) else: lowerCamelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def UpperCamelCase_ ( self ,_lowerCAmelCase ,): lowerCamelCase__ = spectrogram( _lowerCAmelCase ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,) return log_mel_spec.T def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): if audio is None and audio_target is None: raise ValueError("""You must provide either `audio` or `audio_target` values.""" ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if audio is not None: lowerCamelCase__ = self._process_audio( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ,) else: lowerCamelCase__ = None if audio_target is not None: lowerCamelCase__ = self._process_audio( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ,) if inputs is None: return inputs_target else: lowerCamelCase__ = inputs_target["""input_values"""] lowerCamelCase__ = inputs_target.get("""attention_mask""" ) if decoder_attention_mask is not None: lowerCamelCase__ = decoder_attention_mask return inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ = is_batched_numpy or ( isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ): lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa ) elif isinstance(_lowerCAmelCase ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ = speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ = [speech] # needed to make pad() work on spectrogram inputs lowerCamelCase__ = self.feature_size # convert into correct format for padding if is_target: lowerCamelCase__ = [self._extract_mel_features(_lowerCAmelCase ) for waveform in speech] lowerCamelCase__ = BatchFeature({"""input_values""": features} ) lowerCamelCase__ = self.num_mel_bins else: lowerCamelCase__ = BatchFeature({"""input_values""": speech} ) lowerCamelCase__ = self.pad( _lowerCAmelCase ,padding=_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = feature_size_hack # convert input values to correct format lowerCamelCase__ = padded_inputs["""input_values"""] if not isinstance(input_values[0] ,np.ndarray ): lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for array in input_values] elif ( not isinstance(_lowerCAmelCase ,np.ndarray ) and isinstance(input_values[0] ,np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): lowerCamelCase__ = [array.astype(np.floataa ) for array in input_values] elif isinstance(_lowerCAmelCase ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): lowerCamelCase__ = input_values.astype(np.floataa ) # convert attention_mask to correct format lowerCamelCase__ = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: lowerCamelCase__ = ( attention_mask if self._get_padding_strategies(_lowerCAmelCase ,max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCamelCase__ = self.zero_mean_unit_var_norm( padded_inputs["""input_values"""] ,attention_mask=_lowerCAmelCase ,padding_value=self.padding_value ) if return_tensors is not None: lowerCamelCase__ = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = super().to_dict() # Don't serialize these as they are derived from the other properties. lowerCamelCase__ = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""] for name in names: if name in output: del output[name] return output
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' from math import factorial, radians def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : int = 18 , __lowerCAmelCase : int = 10 ): lowerCamelCase__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowerCamelCase__ = radians(__lowerCAmelCase ) lowerCamelCase__ = angle_in_radians lowerCamelCase__ = 3 lowerCamelCase__ = -1 for _ in range(__lowerCAmelCase ): result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase ) lowerCamelCase__ = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": __import__('doctest').testmod()
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = str(id_ ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = [] lowerCamelCase__ = {} # {vertex:distance} def __lt__( self ,_lowerCAmelCase ): return self.key < other.key def __repr__( self ): return self.id def UpperCamelCase_ ( self ,_lowerCAmelCase ): self.neighbors.append(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = weight def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : Vertex ): lowerCamelCase__ = [] for u in graph: lowerCamelCase__ = math.inf lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = graph[:] while q: lowerCamelCase__ = min(__lowerCAmelCase ) q.remove(__lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase__ = u lowerCamelCase__ = u.edges[v.id] for i in range(1 , len(__lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : Vertex ): for u in graph: lowerCamelCase__ = math.inf lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = list(__lowerCAmelCase ) hq.heapify(__lowerCAmelCase ) while h: lowerCamelCase__ = hq.heappop(__lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase__ = u lowerCamelCase__ = u.edges[v.id] hq.heapify(__lowerCAmelCase ) for i in range(1 , len(__lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def A__ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'canine' def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1_63_84 ,_lowerCAmelCase=16 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase=0xe0_00 ,_lowerCAmelCase=0xe0_01 ,_lowerCAmelCase=4 ,_lowerCAmelCase=4 ,_lowerCAmelCase=8 ,_lowerCAmelCase=1_63_84 ,_lowerCAmelCase=1_28 ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = type_vocab_size lowerCamelCase__ = layer_norm_eps # Character config: lowerCamelCase__ = downsampling_rate lowerCamelCase__ = upsampling_kernel_size lowerCamelCase__ = num_hash_functions lowerCamelCase__ = num_hash_buckets lowerCamelCase__ = local_transformer_stride
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : float ): return 10 - x * x def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ): # Bolzano theory in order to find if there is a root between a and b if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) >= 0: raise ValueError("""Wrong space!""" ) lowerCamelCase__ = a while (b - a) >= 0.01: # Find middle point lowerCamelCase__ = (a + b) / 2 # Check if middle point is root if equation(__lowerCAmelCase ) == 0.0: break # Decide the side to repeat the steps if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) < 0: lowerCamelCase__ = c else: lowerCamelCase__ = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from ....utils import logging UpperCamelCase : List[str] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=20_48 ): lowerCamelCase__ = config.__dict__ lowerCamelCase__ = modal_hidden_size if num_labels: lowerCamelCase__ = num_labels
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str ): lowerCamelCase__ , lowerCamelCase__ = set(__lowerCAmelCase ), [start] while stack: lowerCamelCase__ = stack.pop() explored.add(__lowerCAmelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__lowerCAmelCase ) return explored UpperCamelCase : Any = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' from math import ceil def A__ ( __lowerCAmelCase : int = 1001 ): lowerCamelCase__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCamelCase__ = 2 * i + 1 lowerCamelCase__ = 2 * i lowerCamelCase__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: UpperCamelCase : Union[str, Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Tuple = {'vocab_file': 'spiece.model'} UpperCamelCase : Dict = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 UpperCamelCase : Union[str, Any] = { 't5-small': 5_12, 't5-base': 5_12, 't5-large': 5_12, 't5-3b': 5_12, 't5-11b': 5_12, } UpperCamelCase : Union[str, Any] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase=1_00 ,_lowerCAmelCase=None ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowerCamelCase__ = [F'''<extra_id_{i}>''' for i in range(_lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens lowerCamelCase__ = len(set(filter(lambda _lowerCAmelCase : bool("""extra_id""" in str(_lowerCAmelCase ) ) ,_lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) lowerCamelCase__ = legacy lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,extra_ids=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,legacy=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = vocab_file lowerCamelCase__ = extra_ids lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: lowerCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" ,_lowerCAmelCase ,) return max_model_length @property def UpperCamelCase_ ( self ): return self.sp_model.get_piece_size() + self._extra_ids def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + [1] return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ): return list( set(filter(lambda _lowerCAmelCase : bool(re.search(R"""<extra_id_\d+>""" ,_lowerCAmelCase ) ) is not None ,self.additional_special_tokens ) ) ) def UpperCamelCase_ ( self ): return [self._convert_token_to_id(_lowerCAmelCase ) for token in self.get_sentinel_tokens()] def UpperCamelCase_ ( self ,_lowerCAmelCase ): if len(_lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._add_eos_if_not_present(_lowerCAmelCase ) if token_ids_a is None: return token_ids_a else: lowerCamelCase__ = self._add_eos_if_not_present(_lowerCAmelCase ) return token_ids_a + token_ids_a def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: lowerCamelCase__ = SPIECE_UNDERLINE + text.replace(_lowerCAmelCase ,""" """ ) return super().tokenize(_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ): if not self.legacy: lowerCamelCase__ = text.startswith(_lowerCAmelCase ) if is_first: lowerCamelCase__ = text[1:] lowerCamelCase__ = self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(_lowerCAmelCase ): lowerCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token.startswith("""<extra_id_""" ): lowerCamelCase__ = re.match(R"""<extra_id_(\d+)>""" ,_lowerCAmelCase ) lowerCamelCase__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index < self.sp_model.get_piece_size(): lowerCamelCase__ = self.sp_model.IdToPiece(_lowerCAmelCase ) else: lowerCamelCase__ = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup UpperCamelCase : List[str] = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def A__ ( __lowerCAmelCase : str = "dhaka" , __lowerCAmelCase : int = 5 ): lowerCamelCase__ = min(__lowerCAmelCase , 50 ) # Prevent abuse! lowerCamelCase__ = { """q""": query, """tbm""": """isch""", """hl""": """en""", """ijn""": """0""", } lowerCamelCase__ = requests.get("""https://www.google.com/search""" , params=__lowerCAmelCase , headers=__lowerCAmelCase ) lowerCamelCase__ = BeautifulSoup(html.text , """html.parser""" ) lowerCamelCase__ = """""".join( re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) ) lowerCamelCase__ = json.dumps(__lowerCAmelCase ) lowerCamelCase__ = json.loads(__lowerCAmelCase ) lowerCamelCase__ = re.findall( R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __lowerCAmelCase , ) if not matched_google_image_data: return 0 lowerCamelCase__ = re.sub( R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__lowerCAmelCase ) , ) lowerCamelCase__ = re.findall( R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __lowerCAmelCase , ) for index, fixed_full_res_image in enumerate(__lowerCAmelCase ): if index >= max_images: return index lowerCamelCase__ = bytes(__lowerCAmelCase , """ascii""" ).decode( """unicode-escape""" ) lowerCamelCase__ = bytes(__lowerCAmelCase , """ascii""" ).decode( """unicode-escape""" ) lowerCamelCase__ = urllib.request.build_opener() lowerCamelCase__ = [ ( """User-Agent""", """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""", ) ] urllib.request.install_opener(__lowerCAmelCase ) lowerCamelCase__ = F'''query_{query.replace(" " , "_" )}''' if not os.path.exists(__lowerCAmelCase ): os.makedirs(__lowerCAmelCase ) urllib.request.urlretrieve( # noqa: S310 __lowerCAmelCase , F'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: UpperCamelCase : Optional[Any] = download_images_from_google_query(sys.argv[1]) print(F'{image_count} images were downloaded to disk.') except IndexError: print('Please provide a search term.') raise
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Optional[Any] = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase : str = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'audio-spectrogram-transformer' def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=10 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=1_28 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = patch_size lowerCamelCase__ = qkv_bias lowerCamelCase__ = frequency_stride lowerCamelCase__ = time_stride lowerCamelCase__ = max_length lowerCamelCase__ = num_mel_bins
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase : Dict = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[Any] = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = params lowerCamelCase__ = np.array(_lowerCAmelCase ) lowerCamelCase__ = np.array([len(_lowerCAmelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self ,_lowerCAmelCase ): return (self.token_ids[index], self.lengths[index]) def __len__( self ): return len(self.lengths ) def UpperCamelCase_ ( self ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.params.max_model_input_size lowerCamelCase__ = self.lengths > max_len logger.info(F'''Splitting {sum(_lowerCAmelCase )} too long sequences.''' ) def divide_chunks(_lowerCAmelCase ,_lowerCAmelCase ): return [l[i : i + n] for i in range(0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase )] lowerCamelCase__ = [] lowerCamelCase__ = [] if self.params.mlm: lowerCamelCase__ , lowerCamelCase__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: lowerCamelCase__ , lowerCamelCase__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids ,self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCamelCase__ = [] for sub_s in divide_chunks(seq_ ,max_len - 2 ): if sub_s[0] != cls_id: lowerCamelCase__ = np.insert(_lowerCAmelCase ,0 ,_lowerCAmelCase ) if sub_s[-1] != sep_id: lowerCamelCase__ = np.insert(_lowerCAmelCase ,len(_lowerCAmelCase ) ,_lowerCAmelCase ) assert len(_lowerCAmelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_lowerCAmelCase ) new_tok_ids.extend(_lowerCAmelCase ) new_lengths.extend([len(_lowerCAmelCase ) for l in sub_seqs] ) lowerCamelCase__ = np.array(_lowerCAmelCase ) lowerCamelCase__ = np.array(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = len(self ) lowerCamelCase__ = self.lengths > 11 lowerCamelCase__ = self.token_ids[indices] lowerCamelCase__ = self.lengths[indices] lowerCamelCase__ = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def UpperCamelCase_ ( self ): if "unk_token" not in self.params.special_tok_ids: return else: lowerCamelCase__ = self.params.special_tok_ids["""unk_token"""] lowerCamelCase__ = len(self ) lowerCamelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCamelCase__ = (unk_occs / self.lengths) < 0.5 lowerCamelCase__ = self.token_ids[indices] lowerCamelCase__ = self.lengths[indices] lowerCamelCase__ = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def UpperCamelCase_ ( self ): if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [t[0] for t in batch] lowerCamelCase__ = [t[1] for t in batch] assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) # Max for paddings lowerCamelCase__ = max(_lowerCAmelCase ) # Pad token ids if self.params.mlm: lowerCamelCase__ = self.params.special_tok_ids["""pad_token"""] else: lowerCamelCase__ = self.params.special_tok_ids["""unk_token"""] lowerCamelCase__ = [list(t.astype(_lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCAmelCase )) for t in token_ids] assert len(tk_ ) == len(_lowerCAmelCase ) assert all(len(_lowerCAmelCase ) == max_seq_len_ for t in tk_ ) lowerCamelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCamelCase__ = torch.tensor(_lowerCAmelCase ) # (bs) return tk_t, lg_t
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase : List[str] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = b.T lowerCamelCase__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 ) lowerCamelCase__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 ) lowerCamelCase__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = aa[:, None] - 2 * ab + ba[None, :] return d def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ): lowerCamelCase__ = x.reshape(-1 , 3 ) lowerCamelCase__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase ) return np.argmin(__lowerCAmelCase , axis=1 ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['pixel_values'] def __init__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = True ,_lowerCAmelCase = True ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = size if size is not None else {"""height""": 2_56, """width""": 2_56} lowerCamelCase__ = get_size_dict(_lowerCAmelCase ) lowerCamelCase__ = np.array(_lowerCAmelCase ) if clusters is not None else None lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = resample lowerCamelCase__ = do_normalize lowerCamelCase__ = do_color_quantize def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): lowerCamelCase__ = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( _lowerCAmelCase ,size=(size["""height"""], size["""width"""]) ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,): lowerCamelCase__ = rescale(image=_lowerCAmelCase ,scale=1 / 127.5 ,data_format=_lowerCAmelCase ) lowerCamelCase__ = image - 1 return image def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,): lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = size if size is not None else self.size lowerCamelCase__ = get_size_dict(_lowerCAmelCase ) lowerCamelCase__ = resample if resample is not None else self.resample lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize lowerCamelCase__ = clusters if clusters is not None else self.clusters lowerCamelCase__ = np.array(_lowerCAmelCase ) lowerCamelCase__ = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_color_quantize and clusters is None: raise ValueError("""Clusters must be specified if do_color_quantize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase__ = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase__ = [self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase__ = [self.normalize(image=_lowerCAmelCase ) for image in images] if do_color_quantize: lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) lowerCamelCase__ = np.array(_lowerCAmelCase ) lowerCamelCase__ = color_quantize(_lowerCAmelCase ,_lowerCAmelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) lowerCamelCase__ = images.shape[0] lowerCamelCase__ = images.reshape(_lowerCAmelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. lowerCamelCase__ = list(_lowerCAmelCase ) else: lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) for image in images] lowerCamelCase__ = {"""input_ids""": images} return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase )
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): # Initialise PyTorch model lowerCamelCase__ = BigBirdConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: lowerCamelCase__ = BigBirdForQuestionAnswering(__lowerCAmelCase ) else: lowerCamelCase__ = BigBirdForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) UpperCamelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Dict = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'trajectory_transformer' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=1_00 ,_lowerCAmelCase=5 ,_lowerCAmelCase=1 ,_lowerCAmelCase=1 ,_lowerCAmelCase=2_49 ,_lowerCAmelCase=6 ,_lowerCAmelCase=17 ,_lowerCAmelCase=25 ,_lowerCAmelCase=4 ,_lowerCAmelCase=4 ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0006 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=1 ,_lowerCAmelCase=True ,_lowerCAmelCase=1 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = action_weight lowerCamelCase__ = reward_weight lowerCamelCase__ = value_weight lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = block_size lowerCamelCase__ = action_dim lowerCamelCase__ = observation_dim lowerCamelCase__ = transition_dim lowerCamelCase__ = learning_rate lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_embd lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = resid_pdrop lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = kaiming_initializer_range lowerCamelCase__ = use_cache super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor UpperCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" ,_lowerCAmelCase ,) super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = """backbone.""" if is_semantic else """""" lowerCamelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (F'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (F'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (F'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[int]=False ): for i in range(config.num_hidden_layers ): lowerCamelCase__ = """backbone.""" if is_semantic else """""" # queries, keys and values lowerCamelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowerCamelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase__ = q_bias lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowerCamelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowerCamelCase__ = gamma_a lowerCamelCase__ = gamma_a def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = dct.pop(__lowerCAmelCase ) lowerCamelCase__ = val def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = False if """rvlcdip""" in checkpoint_url else True lowerCamelCase__ = BeitConfig(use_absolute_position_embeddings=__lowerCAmelCase , use_mask_token=__lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """rvlcdip-id2label.json""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = create_rename_keys(__lowerCAmelCase , has_lm_head=__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , has_lm_head=__lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = BeitForMaskedImageModeling(__lowerCAmelCase ) if has_lm_head else BeitForImageClassification(__lowerCAmelCase ) model.eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image lowerCamelCase__ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ) lowerCamelCase__ = encoding["""pixel_values"""] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = outputs.logits # verify logits lowerCamelCase__ = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(__lowerCAmelCase ), "Shape of logits not as expected" Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: if has_lm_head: lowerCamelCase__ = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: lowerCamelCase__ = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__lowerCAmelCase , ) if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) UpperCamelCase : Optional[int] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""tf_padding""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""depth_multiplier""" ) ) class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.25 ,_lowerCAmelCase=8 ,_lowerCAmelCase=8 ,_lowerCAmelCase=6 ,_lowerCAmelCase=32 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu6" ,_lowerCAmelCase=12_80 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = depth_multiplier lowerCamelCase__ = depth_divisible_by lowerCamelCase__ = min_depth lowerCamelCase__ = expand_ratio lowerCamelCase__ = tf_padding lowerCamelCase__ = output_stride lowerCamelCase__ = first_layer_is_expansion lowerCamelCase__ = finegrained_output lowerCamelCase__ = hidden_act lowerCamelCase__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowerCamelCase__ = classifier_dropout_prob lowerCamelCase__ = use_labels lowerCamelCase__ = is_training lowerCamelCase__ = num_labels lowerCamelCase__ = initializer_range lowerCamelCase__ = scope def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): return MobileNetVaConfig( num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MobileNetVaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) self.parent.assertEqual( result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileNetVaForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MobileNetVaForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase = ( { 'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification, 'image-segmentation': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileNetVaModelTester(self ) lowerCamelCase__ = MobileNetVaConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""MobileNetV2 does not output attentions""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = 16 self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = MobileNetVaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) lowerCamelCase__ = model.to(_lowerCAmelCase ) lowerCamelCase__ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits # verify the logits lowerCamelCase__ = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] ,device=_lowerCAmelCase ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=0.2 ,_lowerCAmelCase=0.2 ): lowerCamelCase__ = bp_numa lowerCamelCase__ = bp_numa lowerCamelCase__ = bp_numa lowerCamelCase__ = conva_get[:2] lowerCamelCase__ = conva_get[2] lowerCamelCase__ = size_pa lowerCamelCase__ = rate_w lowerCamelCase__ = rate_t lowerCamelCase__ = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowerCamelCase__ = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowerCamelCase__ = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowerCamelCase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowerCamelCase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowerCamelCase__ = -2 * np.random.rand(self.num_bpa ) + 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): # save model dict with pickle lowerCamelCase__ = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(_lowerCAmelCase ,"""wb""" ) as f: pickle.dump(_lowerCAmelCase ,_lowerCAmelCase ) print(F'''Model saved: {save_path}''' ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ): # read saved model with open(_lowerCAmelCase ,"""rb""" ) as f: lowerCamelCase__ = pickle.load(_lowerCAmelCase ) # noqa: S301 lowerCamelCase__ = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) lowerCamelCase__ = model_dic.get("""size_pooling1""" ) lowerCamelCase__ = model_dic.get("""num_bp1""" ) lowerCamelCase__ = model_dic.get("""num_bp2""" ) lowerCamelCase__ = model_dic.get("""num_bp3""" ) lowerCamelCase__ = model_dic.get("""rate_weight""" ) lowerCamelCase__ = model_dic.get("""rate_thre""" ) # create model instance lowerCamelCase__ = CNN(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # modify model parameter lowerCamelCase__ = model_dic.get("""w_conv1""" ) lowerCamelCase__ = model_dic.get("""wkj""" ) lowerCamelCase__ = model_dic.get("""vji""" ) lowerCamelCase__ = model_dic.get("""thre_conv1""" ) lowerCamelCase__ = model_dic.get("""thre_bp2""" ) lowerCamelCase__ = model_dic.get("""thre_bp3""" ) return conv_ins def UpperCamelCase_ ( self ,_lowerCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return round(_lowerCAmelCase ,3 ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # convolution process lowerCamelCase__ = convs[0] lowerCamelCase__ = convs[1] lowerCamelCase__ = np.shape(_lowerCAmelCase )[0] # get the data slice of original image data, data_focus lowerCamelCase__ = [] for i_focus in range(0 ,size_data - size_conv + 1 ,_lowerCAmelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,_lowerCAmelCase ): lowerCamelCase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_lowerCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowerCamelCase__ = [] lowerCamelCase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_lowerCAmelCase ): lowerCamelCase__ = [] for i_focus in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_lowerCAmelCase ) ) lowerCamelCase__ = np.asmatrix(_lowerCAmelCase ).reshape( _lowerCAmelCase ,_lowerCAmelCase ) data_featuremap.append(_lowerCAmelCase ) # expanding the data slice to One dimenssion lowerCamelCase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_lowerCAmelCase ) ) lowerCamelCase__ = np.asarray(_lowerCAmelCase ) return focus_list, data_featuremap def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="average_pool" ): # pooling process lowerCamelCase__ = len(featuremaps[0] ) lowerCamelCase__ = int(size_map / size_pooling ) lowerCamelCase__ = [] for i_map in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = featuremaps[i_map] lowerCamelCase__ = [] for i_focus in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ): for j_focus in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_lowerCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_lowerCAmelCase ) ) lowerCamelCase__ = np.asmatrix(_lowerCAmelCase ).reshape(_lowerCAmelCase ,_lowerCAmelCase ) featuremap_pooled.append(_lowerCAmelCase ) return featuremap_pooled def UpperCamelCase_ ( self ,_lowerCAmelCase ): # expanding three dimension data to one dimension list lowerCamelCase__ = [] for i in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = np.shape(data[i] ) lowerCamelCase__ = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowerCamelCase__ = data_listed.getA().tolist()[0] data_expanded.extend(_lowerCAmelCase ) lowerCamelCase__ = np.asarray(_lowerCAmelCase ) return data_expanded def UpperCamelCase_ ( self ,_lowerCAmelCase ): # expanding matrix to one dimension list lowerCamelCase__ = np.asarray(_lowerCAmelCase ) lowerCamelCase__ = np.shape(_lowerCAmelCase ) lowerCamelCase__ = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = 0 for i_map in range(_lowerCAmelCase ): lowerCamelCase__ = np.ones((size_map, size_map) ) for i in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ): for j in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = pd_pool[ i_pool ] lowerCamelCase__ = i_pool + 1 lowerCamelCase__ = np.multiply( _lowerCAmelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(_lowerCAmelCase ) return pd_all def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=bool ): # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(_lowerCAmelCase )) ) print((""" - - Shape: Teach_Data """, np.shape(_lowerCAmelCase )) ) lowerCamelCase__ = 0 lowerCamelCase__ = [] lowerCamelCase__ = 1_00_00 while rp < n_repeat and mse >= error_accuracy: lowerCamelCase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(_lowerCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowerCamelCase__ = np.asmatrix(datas_train[p] ) lowerCamelCase__ = np.asarray(datas_teach[p] ) lowerCamelCase__ , lowerCamelCase__ = self.convolute( _lowerCAmelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowerCamelCase__ = self.pooling(_lowerCAmelCase ,self.size_poolinga ) lowerCamelCase__ = np.shape(_lowerCAmelCase ) lowerCamelCase__ = self._expand(_lowerCAmelCase ) lowerCamelCase__ = data_bp_input lowerCamelCase__ = np.dot(_lowerCAmelCase ,self.vji.T ) - self.thre_bpa lowerCamelCase__ = self.sig(_lowerCAmelCase ) lowerCamelCase__ = np.dot(_lowerCAmelCase ,self.wkj.T ) - self.thre_bpa lowerCamelCase__ = self.sig(_lowerCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowerCamelCase__ = np.multiply( (data_teach - bp_outa) ,np.multiply(_lowerCAmelCase ,(1 - bp_outa) ) ) lowerCamelCase__ = np.multiply( np.dot(_lowerCAmelCase ,self.wkj ) ,np.multiply(_lowerCAmelCase ,(1 - bp_outa) ) ) lowerCamelCase__ = np.dot(_lowerCAmelCase ,self.vji ) lowerCamelCase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowerCamelCase__ = pd_conva_pooled.T.getA().tolist() lowerCamelCase__ = self._calculate_gradient_from_pool( _lowerCAmelCase ,_lowerCAmelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowerCamelCase__ = self._expand_mat(pd_conva_all[k_conv] ) lowerCamelCase__ = self.rate_weight * np.dot(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowerCamelCase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowerCamelCase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowerCamelCase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowerCamelCase__ = self.thre_bpa - pd_k_all * self.rate_thre lowerCamelCase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowerCamelCase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowerCamelCase__ = rp + 1 lowerCamelCase__ = error_count / patterns all_mse.append(_lowerCAmelCase ) def draw_error(): lowerCamelCase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_lowerCAmelCase ,"""+-""" ) plt.plot(_lowerCAmelCase ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(_lowerCAmelCase ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def UpperCamelCase_ ( self ,_lowerCAmelCase ): # model predict lowerCamelCase__ = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(_lowerCAmelCase )) ) for p in range(len(_lowerCAmelCase ) ): lowerCamelCase__ = np.asmatrix(datas_test[p] ) lowerCamelCase__ , lowerCamelCase__ = self.convolute( _lowerCAmelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowerCamelCase__ = self.pooling(_lowerCAmelCase ,self.size_poolinga ) lowerCamelCase__ = self._expand(_lowerCAmelCase ) lowerCamelCase__ = data_bp_input lowerCamelCase__ = bp_outa * self.vji.T - self.thre_bpa lowerCamelCase__ = self.sig(_lowerCAmelCase ) lowerCamelCase__ = bp_outa * self.wkj.T - self.thre_bpa lowerCamelCase__ = self.sig(_lowerCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowerCamelCase__ = [list(map(self.do_round ,_lowerCAmelCase ) ) for each in produce_out] return np.asarray(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # return the data of image after convoluting process so we can check it out lowerCamelCase__ = np.asmatrix(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = self.convolute( _lowerCAmelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowerCamelCase__ = self.pooling(_lowerCAmelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCamelCase : int = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = {} with open(__lowerCAmelCase , """r""" ) as file: for line_number, line in enumerate(__lowerCAmelCase ): lowerCamelCase__ = line.strip() if line: lowerCamelCase__ = line.split() lowerCamelCase__ = line_number lowerCamelCase__ = words[0] lowerCamelCase__ = value return result def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): for attribute in key.split(""".""" ): lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): lowerCamelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] lowerCamelCase__ = """param""" if weight_type is not None and weight_type != "param": lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape elif weight_type is not None and weight_type == "param": lowerCamelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = shape_pointer.shape # let's reduce dimension lowerCamelCase__ = value[0] else: lowerCamelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): lowerCamelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): lowerCamelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] lowerCamelCase__ = """param""" if weight_type is not None and weight_type != "param": lowerCamelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": lowerCamelCase__ = """.""".join([key, hf_param_name] ) else: lowerCamelCase__ = key lowerCamelCase__ = value if """lm_head""" in full_key else value[0] UpperCamelCase : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=None ): lowerCamelCase__ = False for key, mapped_key in MAPPING.items(): lowerCamelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2] lowerCamelCase__ = mapped_key.replace("""*""" , __lowerCAmelCase ) if "weight_g" in name: lowerCamelCase__ = """weight_g""" elif "weight_v" in name: lowerCamelCase__ = """weight_v""" elif "bias" in name: lowerCamelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__ = """weight""" else: lowerCamelCase__ = None if hf_dict is not None: rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return is_used return is_used def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): lowerCamelCase__ = [] lowerCamelCase__ = fairseq_model.state_dict() lowerCamelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__ = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ = True else: lowerCamelCase__ = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ = name.split(""".""" ) lowerCamelCase__ = int(items[0] ) lowerCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=False ): if config_path is not None: lowerCamelCase__ = WavaVecaConfig.from_pretrained(__lowerCAmelCase ) else: lowerCamelCase__ = WavaVecaConfig() if is_seq_class: lowerCamelCase__ = read_txt_into_dict(__lowerCAmelCase ) lowerCamelCase__ = idalabel lowerCamelCase__ = WavaVecaForSequenceClassification(__lowerCAmelCase ) lowerCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) feature_extractor.save_pretrained(__lowerCAmelCase ) elif is_finetuned: if dict_path: lowerCamelCase__ = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__ = target_dict.pad_index lowerCamelCase__ = target_dict.bos_index lowerCamelCase__ = target_dict.eos_index lowerCamelCase__ = len(target_dict.symbols ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """vocab.json""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) lowerCamelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__ = 0 lowerCamelCase__ = 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , ) lowerCamelCase__ = True if config.feat_extract_norm == """layer""" else False lowerCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) lowerCamelCase__ = WavaVecaForCTC(__lowerCAmelCase ) else: lowerCamelCase__ = WavaVecaForPreTraining(__lowerCAmelCase ) if is_finetuned or is_seq_class: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: lowerCamelCase__ = argparse.Namespace(task="""audio_pretraining""" ) lowerCamelCase__ = fairseq.tasks.setup_task(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase ) lowerCamelCase__ = model[0].eval() recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCamelCase : List[Any] = parser.parse_args() UpperCamelCase : int = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A__ ( __lowerCAmelCase : List[Any] ): # picklable for multiprocessing return x.sum() def A__ ( __lowerCAmelCase : int ): # picklable for multiprocessing return i + 1 @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = 42 class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = {} lowerCamelCase__ = [] lowerCamelCase__ = 1 lowerCamelCase__ = [1, 2] lowerCamelCase__ = {"""a""": 1, """b""": 2} lowerCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]} lowerCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2} lowerCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} lowerCamelCase__ = {} lowerCamelCase__ = [] lowerCamelCase__ = 2 lowerCamelCase__ = [2, 3] lowerCamelCase__ = {"""a""": 2, """b""": 3} lowerCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]} lowerCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3} lowerCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ) lowerCamelCase__ = 2 self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) lowerCamelCase__ = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )} lowerCamelCase__ = {"""a""": 2, """b""": 0, """c""": 2} lowerCamelCase__ = { """a""": np.eye(2 ).astype(_lowerCAmelCase ), """b""": np.zeros(3 ).astype(_lowerCAmelCase ), """c""": np.ones(2 ).astype(_lowerCAmelCase ), } self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,map_numpy=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_lowerCAmelCase ,_lowerCAmelCase ,map_numpy=_lowerCAmelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,) self.assertEqual(map_nested(_lowerCAmelCase ,_lowerCAmelCase ,map_numpy=_lowerCAmelCase ,num_proc=_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_lowerCAmelCase ,_lowerCAmelCase ,map_numpy=_lowerCAmelCase ,num_proc=_lowerCAmelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,) with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda map_nested(lambda _lowerCAmelCase : x + 1 ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {"""a""": 1, """b""": 2} lowerCamelCase__ = {"""a""": 3, """b""": 4} lowerCamelCase__ = {"""a""": 5, """b""": 6} lowerCamelCase__ = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 'bar' lowerCamelCase__ = Foo() self.assertEqual(foo.my_attr ,"""bar""" ) with temporary_assignment(_lowerCAmelCase ,"""my_attr""" ,"""BAR""" ): self.assertEqual(foo.my_attr ,"""BAR""" ) self.assertEqual(foo.my_attr ,"""bar""" ) @pytest.mark.parametrize( """iterable_length, num_proc, expected_num_proc""" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch( """datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool: lowerCamelCase__ = {F'''{i}''': i for i in range(__lowerCAmelCase )} lowerCamelCase__ = map_nested(lambda __lowerCAmelCase : x + 10 , __lowerCAmelCase , num_proc=__lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class UpperCamelCase__ (a ): '''simple docstring''' @require_tf def UpperCamelCase_ ( self ): import tensorflow as tf from tensorflow.keras import layers lowerCamelCase__ = layers.Dense(2 ) def gen_random_output(): lowerCamelCase__ = tf.random.uniform((1, 3) ) return model(_lowerCAmelCase ).numpy() with temp_seed(42 ,set_tensorflow=_lowerCAmelCase ): lowerCamelCase__ = gen_random_output() with temp_seed(42 ,set_tensorflow=_lowerCAmelCase ): lowerCamelCase__ = gen_random_output() lowerCamelCase__ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase ,_lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) @require_torch def UpperCamelCase_ ( self ): import torch def gen_random_output(): lowerCamelCase__ = torch.nn.Linear(3 ,2 ) lowerCamelCase__ = torch.rand(1 ,3 ) return model(_lowerCAmelCase ).detach().numpy() with temp_seed(42 ,set_pytorch=_lowerCAmelCase ): lowerCamelCase__ = gen_random_output() with temp_seed(42 ,set_pytorch=_lowerCAmelCase ): lowerCamelCase__ = gen_random_output() lowerCamelCase__ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase ,_lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) def UpperCamelCase_ ( self ): def gen_random_output(): return np.random.rand(1 ,3 ) with temp_seed(42 ): lowerCamelCase__ = gen_random_output() with temp_seed(42 ): lowerCamelCase__ = gen_random_output() lowerCamelCase__ = gen_random_output() np.testing.assert_equal(_lowerCAmelCase ,_lowerCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) @pytest.mark.parametrize("""input_data""" , [{}] ) def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = NestedDataStructure(__lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( """data, expected_output""" , [ ({}, []), ([], []), ("""foo""", ["""foo"""]), (["""foo""", """bar"""], ["""foo""", """bar"""]), ([["""foo""", """bar"""]], ["""foo""", """bar"""]), ([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]), ([[["""foo"""], """bar"""]], ["""foo""", """bar"""]), ({"""a""": 1, """b""": 2}, [1, 2]), ({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]), ({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]), ] , ) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): lowerCamelCase__ = NestedDataStructure(__lowerCAmelCase ).flatten() assert output == expected_output def A__ ( ): lowerCamelCase__ = A(x=1 , y="""foobar""" ) lowerCamelCase__ = {"""x""": 1, """y""": """foobar"""} assert asdict(__lowerCAmelCase ) == expected_output lowerCamelCase__ = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]} lowerCamelCase__ = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]} assert asdict(__lowerCAmelCase ) == expected_output with pytest.raises(__lowerCAmelCase ): asdict([1, A(x=10 , y="""foo""" )] ) def A__ ( __lowerCAmelCase : str ): return text.split() def A__ ( __lowerCAmelCase : Optional[Any] ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A__ ( ): with Pool(2 ) as pool: lowerCamelCase__ = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(__lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: lowerCamelCase__ = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(__lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: lowerCamelCase__ = [] for yield_time, content in iflatmap_unordered( __lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__lowerCAmelCase ) assert out.count("""a""" ) == 2 assert out.count("""b""" ) == 2 assert len(__lowerCAmelCase ) == 4
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class UpperCamelCase__ (a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( _lowerCAmelCase ): raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self ): raise NotImplementedError()
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' import os import string import sys UpperCamelCase : Optional[int] = 1 << 8 UpperCamelCase : Any = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54, } UpperCamelCase : Any = KEYMAP['up'] UpperCamelCase : int = KEYMAP['left'] if sys.platform == "win32": UpperCamelCase : Union[str, Any] = [] UpperCamelCase : Any = { B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(10): UpperCamelCase : Tuple = ord(str(i)) def A__ ( ): if os.name == "nt": import msvcrt lowerCamelCase__ = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__lowerCAmelCase ) == 0: # Read the keystroke lowerCamelCase__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCamelCase__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCamelCase__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__lowerCAmelCase ) if ord(__lowerCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCamelCase__ = chr(KEYMAP["""esc"""] ) except KeyError: lowerCamelCase__ = cha[1] else: lowerCamelCase__ = ch.decode(__lowerCAmelCase ) else: lowerCamelCase__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCamelCase__ = sys.stdin.fileno() lowerCamelCase__ = termios.tcgetattr(__lowerCAmelCase ) try: tty.setraw(__lowerCAmelCase ) lowerCamelCase__ = sys.stdin.read(1 ) finally: termios.tcsetattr(__lowerCAmelCase , termios.TCSADRAIN , __lowerCAmelCase ) return ch def A__ ( ): lowerCamelCase__ = get_raw_chars() if ord(__lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__lowerCAmelCase ) == KEYMAP["esc"]: lowerCamelCase__ = get_raw_chars() if ord(__lowerCAmelCase ) == KEYMAP["mod_int"]: lowerCamelCase__ = get_raw_chars() if ord(__lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__lowerCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Any = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : Optional[Any] ): print("""Loading config file...""" ) def flatten_yaml_as_dict(__lowerCAmelCase : Any , __lowerCAmelCase : str="" , __lowerCAmelCase : Dict="." ): lowerCamelCase__ = [] for k, v in d.items(): lowerCamelCase__ = parent_key + sep + k if parent_key else k if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(__lowerCAmelCase ) lowerCamelCase__ = argparse.Namespace() with open(__lowerCAmelCase , """r""" ) as yaml_file: try: lowerCamelCase__ = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader ) lowerCamelCase__ = flatten_yaml_as_dict(__lowerCAmelCase ) for k, v in flat_cfg.items(): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(__lowerCAmelCase , str(__lowerCAmelCase ) ) ) return config def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = MobileViTVaConfig() lowerCamelCase__ = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ = 384 else: lowerCamelCase__ = 256 lowerCamelCase__ = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ = 2_1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ = 384 else: lowerCamelCase__ = 256 lowerCamelCase__ = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ = 151 lowerCamelCase__ = 512 lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ = 21 lowerCamelCase__ = 512 lowerCamelCase__ = """pascal-voc-id2label.json""" lowerCamelCase__ = True # orig_config lowerCamelCase__ = load_orig_config_file(__lowerCAmelCase ) assert getattr(__lowerCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ = getattr(__lowerCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(__lowerCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ = getattr(__lowerCAmelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} return config def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): lowerCamelCase__ = dct.pop(__lowerCAmelCase ) lowerCamelCase__ = val def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=False ): if base_model: lowerCamelCase__ = """""" else: lowerCamelCase__ = """mobilevitv2.""" lowerCamelCase__ = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ = k[8:] else: lowerCamelCase__ = k if ".block." in k: lowerCamelCase__ = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: lowerCamelCase__ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: lowerCamelCase__ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ = [0, 1] elif i == 4: lowerCamelCase__ = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(__lowerCAmelCase ) for k in keys_to_ignore: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): lowerCamelCase__ = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase ) # load original state_dict lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval() lowerCamelCase__ = False else: lowerCamelCase__ = MobileViTVaForImageClassification(__lowerCAmelCase ).eval() lowerCamelCase__ = False # remove and rename some keys of load the original model lowerCamelCase__ = checkpoint remove_unused_keys(__lowerCAmelCase ) lowerCamelCase__ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load modified state_dict model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ = model(**__lowerCAmelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ = outputs.logits lowerCamelCase__ = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ) assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) UpperCamelCase : Dict = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCamelCase : str = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): warnings.warn( """The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PerceiverImageProcessor instead.""" ,_lowerCAmelCase ,) super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list[list[float]] ): lowerCamelCase__ = [] for data in source_data: for i, el in enumerate(__lowerCAmelCase ): if len(__lowerCAmelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowerCAmelCase ) ) return data_lists def A__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = min(__lowerCAmelCase ) lowerCamelCase__ = max(__lowerCAmelCase ) lowerCamelCase__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCamelCase__ = F'''Invalid weight of {weight:f} provided''' raise ValueError(__lowerCAmelCase ) score_lists.append(__lowerCAmelCase ) return score_lists def A__ ( __lowerCAmelCase : list[list[float]] ): lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowerCAmelCase ): lowerCamelCase__ = final_scores[j] + ele return final_scores def A__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = get_data(__lowerCAmelCase ) lowerCamelCase__ = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = generate_final_scores(__lowerCAmelCase ) # append scores to source data for i, ele in enumerate(__lowerCAmelCase ): source_data[i].append(__lowerCAmelCase ) return source_data
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1