code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import torch from diffusers import DiffusionPipeline class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, _a, _a ) -> Optional[Any]: super().__init__() self.register_modules(unet=_a, scheduler=_a ) def __call__( self ) -> int: __SCREAMING_SNAKE_CASE = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step(_a, _a, _a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler_output - scheduler_output + torch.ones_like(_a ) return result
693
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_a ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @require_tf def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @slow @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
693
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : Optional[int] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def _A ( __snake_case :List[str] , __snake_case :Any , __snake_case :Dict=8 ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __SCREAMING_SNAKE_CASE = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, _a, _a, _a, ) -> List[Any]: super().__init__() self.register_modules( unet=_a, scheduler=_a, movq=_a, ) __SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a ) -> str: if latents is None: __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a, device=_a, dtype=_a ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __SCREAMING_SNAKE_CASE = latents.to(_a ) __SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma return latents def __lowerCAmelCase ( self, _a=0 ) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''' ) __SCREAMING_SNAKE_CASE = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a, _a ) def __lowerCAmelCase ( self, _a=0 ) -> Optional[Any]: if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) __SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=_a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __SCREAMING_SNAKE_CASE = None for cpu_offloaded_model in [self.unet, self.movq]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cpu_offload_with_hook(_a, _a, prev_module_hook=_a ) # We'll offload the last model manually. __SCREAMING_SNAKE_CASE = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCAmelCase ( self ) -> int: if not hasattr(self.unet, "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(_a, "_hf_hook" ) and hasattr(module._hf_hook, "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_a ) def __call__( self, _a, _a, _a = 5_12, _a = 5_12, _a = 1_00, _a = 4.0, _a = 1, _a = None, _a = None, _a = "pil", _a = True, ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self._execution_device __SCREAMING_SNAKE_CASE = guidance_scale > 1.0 if isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = torch.cat(_a, dim=0 ) __SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt if isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = torch.cat(_a, dim=0 ) if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(_a, dim=0 ) __SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(_a, dim=0 ) __SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=_a ) self.scheduler.set_timesteps(_a, device=_a ) __SCREAMING_SNAKE_CASE = self.scheduler.timesteps __SCREAMING_SNAKE_CASE = self.unet.config.in_channels __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = downscale_height_and_width(_a, _a, self.movq_scale_factor ) # create initial latent __SCREAMING_SNAKE_CASE = self.prepare_latents( (batch_size, num_channels_latents, height, width), image_embeds.dtype, _a, _a, _a, self.scheduler, ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance __SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __SCREAMING_SNAKE_CASE = {"image_embeds": image_embeds} __SCREAMING_SNAKE_CASE = self.unet( sample=_a, timestep=_a, encoder_hidden_states=_a, added_cond_kwargs=_a, return_dict=_a, )[0] if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1], dim=1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = variance_pred.chunk(2 ) __SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text], dim=1 ) if not ( hasattr(self.scheduler.config, "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1], dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __SCREAMING_SNAKE_CASE = self.scheduler.step( _a, _a, _a, generator=_a, )[0] # post-processing __SCREAMING_SNAKE_CASE = self.movq.decode(_a, force_not_quantize=_a )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __SCREAMING_SNAKE_CASE = image * 0.5 + 0.5 __SCREAMING_SNAKE_CASE = image.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
693
from __future__ import annotations import math def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] __SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _snake_case : Dict = logging.getLogger(__name__) _snake_case : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _snake_case : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={ """help""": ( """The model checkpoint for weights initialization. Leave None if you want to train a model from""" """ scratch.""" ) } , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE )} , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input training data file (a text file)."""} ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={ """help""": ( """The input training data files (multiple files in glob format). """ """Very often splitting large files to smaller files can prevent tokenizer going out of memory""" ) } , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} ) SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether ot not to use whole word mask."""} ) SCREAMING_SNAKE_CASE__ =field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) SCREAMING_SNAKE_CASE__ =field( default=1 / 6 , metadata={ """help""": ( """Ratio of length of a span of masked tokens to surrounding context length for permutation language""" """ modeling.""" ) } , ) SCREAMING_SNAKE_CASE__ =field( default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} ) SCREAMING_SNAKE_CASE__ =field( default=-1 , metadata={ """help""": ( """Optional input sequence length after tokenization.""" """The training dataset will be truncated in block of this size for training.""" """Default to the model max input length for single sentence inputs (take into account special tokens).""" ) } , ) SCREAMING_SNAKE_CASE__ =field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( __snake_case :DataTrainingArguments , __snake_case :PreTrainedTokenizer , __snake_case :bool = False , __snake_case :Optional[str] = None , ) -> Union[str, Any]: """simple docstring""" def _dataset(__snake_case :Optional[Any] , __snake_case :Tuple=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" ) return LineByLineWithRefDataset( tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , ref_path=__snake_case , ) return LineByLineTextDataset(tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size ) else: return TextDataset( tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__snake_case , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _A ( ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.tokenizer_name: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another" " script, save it,and load it from here, using --tokenizer_name" ) if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , ) else: logger.info("Training new model from scratch" ) __SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(__snake_case ) model.resize_token_embeddings(len(__snake_case ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( "BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the" "--mlm flag (masked language modeling)." ) if data_args.block_size <= 0: __SCREAMING_SNAKE_CASE = tokenizer.max_len # Our input block size will be the max possible for the model else: __SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len ) # Get datasets __SCREAMING_SNAKE_CASE = ( get_dataset(__snake_case , tokenizer=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __SCREAMING_SNAKE_CASE = ( get_dataset(__snake_case , tokenizer=__snake_case , evaluate=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling( tokenizer=__snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask( tokenizer=__snake_case , mlm_probability=data_args.mlm_probability ) else: __SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling( tokenizer=__snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__snake_case , args=__snake_case , data_collator=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , prediction_loss_only=__snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=__snake_case ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __SCREAMING_SNAKE_CASE = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __SCREAMING_SNAKE_CASE = trainer.evaluate() __SCREAMING_SNAKE_CASE = math.exp(eval_output["eval_loss"] ) __SCREAMING_SNAKE_CASE = {"perplexity": perplexity} __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "eval_results_lm.txt" ) if trainer.is_world_master(): with open(__snake_case , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , __snake_case , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) results.update(__snake_case ) return results def _A ( __snake_case :Any ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
693
def _A ( __snake_case :bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def _A ( __snake_case :str ) -> bytes: """simple docstring""" if (len(__snake_case ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case : Any = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Any = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Union[str, Any] = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Union[str, Any] = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
693
from functools import lru_cache def _A ( __snake_case :int ) -> set: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__snake_case ) if n > 1: factors.add(__snake_case ) return factors @lru_cache def _A ( __snake_case :int ) -> int: """simple docstring""" return len(unique_prime_factors(__snake_case ) ) def _A ( __snake_case :list ) -> bool: """simple docstring""" return len(set(__snake_case ) ) in (0, 1) def _A ( __snake_case :int ) -> list: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 while True: # Increment each value of a generated range __SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. __SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group] checker.append(__snake_case ) # If all numbers in the list are equal, return the group variable. if equality(__snake_case ): return group # Increment our base variable by 1 base += 1 def _A ( __snake_case :int = 4 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = run(__snake_case ) return results[0] if len(__snake_case ) else None if __name__ == "__main__": print(solution())
693
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _snake_case : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Dict = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _A ( __snake_case :Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(__snake_case , __snake_case ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = "huggingface/label-files" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 400 __SCREAMING_SNAKE_CASE = "kinetics400-id2label.json" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 174 __SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if "small" in model_name: __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 1536 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 192 __SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 1024 __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 1280 __SCREAMING_SNAKE_CASE = 5120 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 640 __SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def _A ( __snake_case :List[Any] ) -> Optional[int]: """simple docstring""" if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace("encoder." , "" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" ) return name def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case ) if key.startswith("encoder." ): __SCREAMING_SNAKE_CASE = key.replace("encoder." , "" ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split("." ) if key.startswith("decoder.blocks" ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = "decoder.decoder_layers." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = "videomae.encoder.layer." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _A ( ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __SCREAMING_SNAKE_CASE = np.load(__snake_case ) return list(__snake_case ) def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = "pytorch_model.bin" gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" ) if "model" in files: __SCREAMING_SNAKE_CASE = files["model"] else: __SCREAMING_SNAKE_CASE = files["module"] __SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case ) __SCREAMING_SNAKE_CASE = model(**__snake_case ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(__snake_case , __snake_case , atol=1e-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐Ÿค— hub.' ) _snake_case : Optional[int] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
693
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case : Optional[int] = { 'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[Any] = [ 'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST', 'NezhaForNextSentencePrediction', 'NezhaForMaskedLM', 'NezhaForPreTraining', 'NezhaForMultipleChoice', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, *_a, **_a ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead.", _a, ) super().__init__(*_a, **_a )
693
1
def _A ( __snake_case :int , __snake_case :int ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def _A ( ) -> None: """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
693
from math import sqrt def _A ( __snake_case :int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(__snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(__snake_case ): total += i + n // i elif i == sqrt(__snake_case ): total += i return total - n def _A ( __snake_case :int = 1_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , __snake_case ) if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
693
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ =Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ =Features({"""labels""": ClassLabel} ) SCREAMING_SNAKE_CASE__ ="audio" SCREAMING_SNAKE_CASE__ ="labels" def __lowerCAmelCase ( self, _a ) -> str: if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column], _a ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __SCREAMING_SNAKE_CASE = copy.deepcopy(self ) __SCREAMING_SNAKE_CASE = self.label_schema.copy() __SCREAMING_SNAKE_CASE = features[self.label_column] __SCREAMING_SNAKE_CASE = label_schema return task_template @property def __lowerCAmelCase ( self ) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
693
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _A ( __snake_case :str , __snake_case :str , __snake_case :Optional[str] = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path __SCREAMING_SNAKE_CASE = quote(__snake_case ) return hfh.hf_hub_url(__snake_case , __snake_case , repo_type="dataset" , revision=__snake_case )
693
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = encoder_seq_length __SCREAMING_SNAKE_CASE = decoder_seq_length # For common tests __SCREAMING_SNAKE_CASE = self.decoder_seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = d_ff __SCREAMING_SNAKE_CASE = relative_attention_num_buckets __SCREAMING_SNAKE_CASE = dropout_rate __SCREAMING_SNAKE_CASE = initializer_factor __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = decoder_layers def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig.from_pretrained("google/umt5-base" ) def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int: if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = config.num_attention_heads __SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a ) return config, input_dict def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig( vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return TaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model( input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, ) __SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a ) __SCREAMING_SNAKE_CASE = result.last_hidden_state __SCREAMING_SNAKE_CASE = result.past_key_values __SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ), config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ), 4 ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = model(_a ) __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval() __SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE__ =[0.8, 0.9] def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision" ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs[0] __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) __SCREAMING_SNAKE_CASE = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), } for attn_name, (name, mask) in zip(_a, head_masking.items() ): __SCREAMING_SNAKE_CASE = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_heads, device=_a ) __SCREAMING_SNAKE_CASE = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, ) # We check the state of decoder_attentions and cross_attentions just from the last step __SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __lowerCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a ) __SCREAMING_SNAKE_CASE = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_a, _a ) __SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) ) __SCREAMING_SNAKE_CASE = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a ) self.assertEqual(_a, _a )
693
1
def _A ( __snake_case :Any , __snake_case :Optional[int] , __snake_case :str , __snake_case :int , __snake_case :List[str] , __snake_case :Any ) -> Dict: """simple docstring""" if index == r: for j in range(__snake_case ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __SCREAMING_SNAKE_CASE = arr[i] combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _A ( __snake_case :List[str] , __snake_case :Any , __snake_case :Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = [0] * r # Print all combination using temporary array 'data[]' combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 ) if __name__ == "__main__": # Driver code to check the function above _snake_case : Dict = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
693
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __SCREAMING_SNAKE_CASE = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) __SCREAMING_SNAKE_CASE = model.state_dict() def to_tf_var_name(__snake_case :str ): for patt, repl in iter(__snake_case ): __SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case ) return f'''bert/{name}''' def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ): __SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype ) __SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case ) __SCREAMING_SNAKE_CASE = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __SCREAMING_SNAKE_CASE = torch_tensor.T __SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = session.run(__snake_case ) print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) __SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _A ( __snake_case :str=None ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) __SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
693
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self, _a, _a=13, _a=3, _a=2_24, _a=30, _a=4_00, _a=True, _a=None, _a=True, _a=[0.5, 0.5, 0.5], _a=[0.5, 0.5, 0.5], ) -> str: __SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std def __lowerCAmelCase ( self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =ViTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self ) @property def __lowerCAmelCase ( self ) -> Optional[Any]: return self.image_proc_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a, "image_mean" ) ) self.assertTrue(hasattr(_a, "image_std" ) ) self.assertTrue(hasattr(_a, "do_normalize" ) ) self.assertTrue(hasattr(_a, "do_resize" ) ) self.assertTrue(hasattr(_a, "size" ) ) def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self ) -> Any: # Initialize image_processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester, equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a, Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), ) def __lowerCAmelCase ( self ) -> Optional[int]: # Initialize image_processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester, equal_resolution=_a, numpify=_a ) for image in image_inputs: self.assertIsInstance(_a, np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), ) def __lowerCAmelCase ( self ) -> List[Any]: # Initialize image_processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester, equal_resolution=_a, torchify=_a ) for image in image_inputs: self.assertIsInstance(_a, torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(_a, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ), )
693
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""] def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str: super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_a, np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa ) elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a ).T] # verify inputs are valid for idx, example in enumerate(_a ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( _a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a ) return padded_inputs
693
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils ) __SCREAMING_SNAKE_CASE = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __SCREAMING_SNAKE_CASE = test_metrics @require_cpu def __lowerCAmelCase ( self ) -> Dict: debug_launcher(self.test_metrics.main, num_processes=1 ) @require_cpu def __lowerCAmelCase ( self ) -> Optional[Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def __lowerCAmelCase ( self ) -> Union[str, Any]: self.test_metrics.main() @require_multi_gpu def __lowerCAmelCase ( self ) -> Tuple: print(f'''Found {torch.cuda.device_count()} devices.''' ) __SCREAMING_SNAKE_CASE = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a, env=os.environ.copy() )
693
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
1
def _A ( __snake_case :str ) -> bool: """simple docstring""" if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) __SCREAMING_SNAKE_CASE = sorted(string.lower() ) return len(__snake_case ) == len(set(__snake_case ) ) if __name__ == "__main__": _snake_case : List[Any] = input('Enter a string ').strip() _snake_case : Dict = is_isogram(input_str) print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
693
def _A ( __snake_case :int = 400_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
693
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _snake_case : Any = get_tests_dir('fixtures') class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE = mock.Mock() __SCREAMING_SNAKE_CASE = 5_00 __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = HTTPError __SCREAMING_SNAKE_CASE = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=_a ) as mock_head: __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def __lowerCAmelCase ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @classmethod def __lowerCAmelCase ( cls ) -> Dict: __SCREAMING_SNAKE_CASE = TOKEN HfFolder.save_token(_a ) @classmethod def __lowerCAmelCase ( cls ) -> Dict: try: delete_repo(token=cls._token, repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_a ) feature_extractor.push_to_hub("test-feature-extractor", use_auth_token=self._token ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(_a, _a ) ) # Reset repo delete_repo(token=self._token, repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _a, repo_id="test-feature-extractor", push_to_hub=_a, use_auth_token=self._token ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(_a, _a ) ) def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_a ) feature_extractor.push_to_hub("valid_org/test-feature-extractor", use_auth_token=self._token ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(_a, _a ) ) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _a, repo_id="valid_org/test-feature-extractor-org", push_to_hub=_a, use_auth_token=self._token ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(_a, _a ) ) def __lowerCAmelCase ( self ) -> Any: CustomFeatureExtractor.register_for_auto_class() __SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_a ) feature_extractor.push_to_hub("test-dynamic-feature-extractor", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) __SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained( f'''{USER}/test-dynamic-feature-extractor''', trust_remote_code=_a ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor" )
693
from __future__ import annotations _snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(__snake_case ) for i in range(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(__snake_case ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case : Optional[Any] = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
693
1
# flake8: noqa # Lint as: python3 _snake_case : int = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
693
from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self, _a ) -> Any: __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __repr__( self ) -> str: return f'''Node({self.data})''' class __SCREAMING_SNAKE_CASE : def __init__( self ) -> Tuple: __SCREAMING_SNAKE_CASE = None def __iter__( self ) -> Any: __SCREAMING_SNAKE_CASE = self.head while node: yield node.data __SCREAMING_SNAKE_CASE = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(_a ) for item in self] ) def __getitem__( self, _a ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, _a, _a ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) __SCREAMING_SNAKE_CASE = self.head for _ in range(_a ): __SCREAMING_SNAKE_CASE = current.next __SCREAMING_SNAKE_CASE = data def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(len(self ), _a ) def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(0, _a ) def __lowerCAmelCase ( self, _a, _a ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) __SCREAMING_SNAKE_CASE = Node(_a ) if self.head is None: __SCREAMING_SNAKE_CASE = new_node elif index == 0: __SCREAMING_SNAKE_CASE = self.head # link new_node to head __SCREAMING_SNAKE_CASE = new_node else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = new_node def __lowerCAmelCase ( self ) -> None: # print every node data print(self ) def __lowerCAmelCase ( self ) -> Any: return self.delete_nth(0 ) def __lowerCAmelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self, _a = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) __SCREAMING_SNAKE_CASE = self.head # default first node if index == 0: __SCREAMING_SNAKE_CASE = self.head.next else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next.next return delete_node.data def __lowerCAmelCase ( self ) -> bool: return self.head is None def __lowerCAmelCase ( self ) -> None: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = self.head while current: # Store the current node's next node. __SCREAMING_SNAKE_CASE = current.next # Make the current node's next point backwards __SCREAMING_SNAKE_CASE = prev # Make the previous node be the current node __SCREAMING_SNAKE_CASE = current # Make the current node the next node (to progress iteration) __SCREAMING_SNAKE_CASE = next_node # Return prev in order to put the head at the end __SCREAMING_SNAKE_CASE = prev def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __SCREAMING_SNAKE_CASE = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [ -9, 100, Node(7734_5112 ), "dlrow olleH", 7, 5555, 0, -1_9_2.5_5_5_5_5, "Hello, world!", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] __SCREAMING_SNAKE_CASE = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __SCREAMING_SNAKE_CASE = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __SCREAMING_SNAKE_CASE = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> Union[str, Any]: """simple docstring""" from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__snake_case ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) __SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip() print("New list:" ) print(__snake_case ) print(f'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
693
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case : List[Any] = { 'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'], 'tokenization_roformer': ['RoFormerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Dict = ['RoFormerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Union[str, Any] = [ 'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerLayer', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[Any] = [ 'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerLayer', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : str = [ 'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
import argparse import json from tqdm import tqdm def _A ( ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , ) __SCREAMING_SNAKE_CASE = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: __SCREAMING_SNAKE_CASE = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): __SCREAMING_SNAKE_CASE = dpr_record["question"] __SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(__snake_case ) + "\n" ) if __name__ == "__main__": main()
693
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _A ( ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=__snake_case , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=__snake_case , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=__snake_case ) return parser.parse_args() def _A ( ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parse_args() # Import training_script as a module. __SCREAMING_SNAKE_CASE = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __SCREAMING_SNAKE_CASE = script_fpath.stem __SCREAMING_SNAKE_CASE = importlib.import_module(__snake_case ) # Patch sys.argv __SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
693
def _A ( __snake_case :int = 10**9 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
693
1
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
693
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _snake_case , _snake_case , _snake_case : List[Any] = False, False, False @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =None SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =None # Automatically constructed SCREAMING_SNAKE_CASE__ ="dict" SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Optional[int]: return self.pa_type def __lowerCAmelCase ( self, _a ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_a, _a ): return {"bytes": None, "path": value} elif isinstance(_a, _a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE = BytesIO() sf.write(_a, value["array"], value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67 __SCREAMING_SNAKE_CASE = BytesIO(bytes() ) sf.write(_a, _a, value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __lowerCAmelCase ( self, _a, _a = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __SCREAMING_SNAKE_CASE = token_per_repo_id or {} __SCREAMING_SNAKE_CASE = path.split("::" )[-1] try: __SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"] __SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE = None with xopen(_a, "rb", use_auth_token=_a ) as f: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) __SCREAMING_SNAKE_CASE = array.T if self.mono: __SCREAMING_SNAKE_CASE = librosa.to_mono(_a ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __lowerCAmelCase ( self, _a ) -> pa.StructArray: if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("bytes" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("path" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() ) return array_cast(_a, self.pa_type ) def __lowerCAmelCase ( self, _a ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_a ): with xopen(_a, "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() return bytes_ __SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) __SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(_a, self.pa_type )
693
1
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : Any = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def _A ( __snake_case :List[Any] , __snake_case :List[Any] , __snake_case :Optional[Any]=8 ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __SCREAMING_SNAKE_CASE = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _A ( __snake_case :Dict , __snake_case :List[Any]=512 , __snake_case :str=512 ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) __SCREAMING_SNAKE_CASE = np.array(pil_image.convert("RGB" ) ) __SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_2_7.5 - 1 __SCREAMING_SNAKE_CASE = np.transpose(__snake_case , [2, 0, 1] ) __SCREAMING_SNAKE_CASE = torch.from_numpy(__snake_case ).unsqueeze(0 ) return image class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, _a, _a, _a, ) -> Optional[Any]: super().__init__() self.register_modules( unet=_a, scheduler=_a, movq=_a, ) __SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCAmelCase ( self, _a, _a, _a ) -> Optional[Any]: # get the original timestep using init_timestep __SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ), _a ) __SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep, 0 ) __SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a=None ) -> List[Any]: if not isinstance(_a, (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}''' ) __SCREAMING_SNAKE_CASE = image.to(device=_a, dtype=_a ) __SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt if image.shape[1] == 4: __SCREAMING_SNAKE_CASE = image else: if isinstance(_a, _a ) and len(_a ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a ) ] __SCREAMING_SNAKE_CASE = torch.cat(_a, dim=0 ) else: __SCREAMING_SNAKE_CASE = self.movq.encode(_a ).latent_dist.sample(_a ) __SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents __SCREAMING_SNAKE_CASE = torch.cat([init_latents], dim=0 ) __SCREAMING_SNAKE_CASE = init_latents.shape __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a, device=_a, dtype=_a ) # get latents __SCREAMING_SNAKE_CASE = self.scheduler.add_noise(_a, _a, _a ) __SCREAMING_SNAKE_CASE = init_latents return latents def __lowerCAmelCase ( self, _a=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''' ) __SCREAMING_SNAKE_CASE = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a, _a ) def __lowerCAmelCase ( self, _a=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) __SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=_a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __SCREAMING_SNAKE_CASE = None for cpu_offloaded_model in [self.unet, self.movq]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cpu_offload_with_hook(_a, _a, prev_module_hook=_a ) # We'll offload the last model manually. __SCREAMING_SNAKE_CASE = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCAmelCase ( self ) -> List[Any]: if not hasattr(self.unet, "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(_a, "_hf_hook" ) and hasattr(module._hf_hook, "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_a ) def __call__( self, _a, _a, _a, _a = 5_12, _a = 5_12, _a = 1_00, _a = 4.0, _a = 0.3, _a = 1, _a = None, _a = "pil", _a = True, ) -> Dict: __SCREAMING_SNAKE_CASE = self._execution_device __SCREAMING_SNAKE_CASE = guidance_scale > 1.0 if isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = torch.cat(_a, dim=0 ) __SCREAMING_SNAKE_CASE = image_embeds.shape[0] if isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = torch.cat(_a, dim=0 ) if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(_a, dim=0 ) __SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(_a, dim=0 ) __SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=_a ) if not isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = [image] if not all(isinstance(_a, (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f'''Input is in incorrect format: {[type(_a ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) __SCREAMING_SNAKE_CASE = torch.cat([prepare_image(_a, _a, _a ) for i in image], dim=0 ) __SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype, device=_a ) __SCREAMING_SNAKE_CASE = self.movq.encode(_a )["latents"] __SCREAMING_SNAKE_CASE = latents.repeat_interleave(_a, dim=0 ) self.scheduler.set_timesteps(_a, device=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_timesteps(_a, _a, _a ) __SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = downscale_height_and_width(_a, _a, self.movq_scale_factor ) __SCREAMING_SNAKE_CASE = self.prepare_latents( _a, _a, _a, _a, image_embeds.dtype, _a, _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance __SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __SCREAMING_SNAKE_CASE = {"image_embeds": image_embeds} __SCREAMING_SNAKE_CASE = self.unet( sample=_a, timestep=_a, encoder_hidden_states=_a, added_cond_kwargs=_a, return_dict=_a, )[0] if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1], dim=1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = variance_pred.chunk(2 ) __SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text], dim=1 ) if not ( hasattr(self.scheduler.config, "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1], dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __SCREAMING_SNAKE_CASE = self.scheduler.step( _a, _a, _a, generator=_a, )[0] # post-processing __SCREAMING_SNAKE_CASE = self.movq.decode(_a, force_not_quantize=_a )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __SCREAMING_SNAKE_CASE = image * 0.5 + 0.5 __SCREAMING_SNAKE_CASE = image.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
693
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00} config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, **_a ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> str: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
693
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Dict = logging.get_logger(__name__) _snake_case : Dict = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ ="""funnel""" SCREAMING_SNAKE_CASE__ ={ """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self, _a=3_05_22, _a=[4, 4, 4], _a=None, _a=2, _a=7_68, _a=12, _a=64, _a=30_72, _a="gelu_new", _a=0.1, _a=0.1, _a=0.0, _a=0.1, _a=None, _a=1E-9, _a="mean", _a="relative_shift", _a=True, _a=True, _a=True, **_a, ) -> Any: __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = block_sizes __SCREAMING_SNAKE_CASE = [1] * len(_a ) if block_repeats is None else block_repeats assert len(_a ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." __SCREAMING_SNAKE_CASE = num_decoder_layers __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = d_head __SCREAMING_SNAKE_CASE = d_inner __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = initializer_std __SCREAMING_SNAKE_CASE = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' __SCREAMING_SNAKE_CASE = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' __SCREAMING_SNAKE_CASE = attention_type __SCREAMING_SNAKE_CASE = separate_cls __SCREAMING_SNAKE_CASE = truncate_seq __SCREAMING_SNAKE_CASE = pool_q_only super().__init__(**_a ) @property def __lowerCAmelCase ( self ) -> int: return sum(self.block_sizes ) @num_hidden_layers.setter def __lowerCAmelCase ( self, _a ) -> List[str]: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def __lowerCAmelCase ( self ) -> Tuple: return len(self.block_sizes ) @num_blocks.setter def __lowerCAmelCase ( self, _a ) -> List[Any]: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
693
import random from .binary_exp_mod import bin_exp_mod def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __SCREAMING_SNAKE_CASE = n - 1 __SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __SCREAMING_SNAKE_CASE = 0 while count < prec: __SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 ) __SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case ) if b != 1: __SCREAMING_SNAKE_CASE = True for _ in range(__snake_case ): if b == n - 1: __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _snake_case : int = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
693
1
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=13, _a=7, _a=True, _a=True, _a=True, _a=True, _a=99, _a=32, _a=5, _a=4, _a=4, _a="gelu", _a=0.0, _a=0.1, _a=True, _a=5_12, _a=16, _a=2, _a=0.02, _a=3, _a=4, _a=None, ) -> str: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_multiple_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = weight_tying __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def __lowerCAmelCase ( self ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Any: return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_multiple_size=self.intermediate_multiple_size, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, weight_tying=self.weight_tying, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_a, initializer_range=self.initializer_range, ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self, _a, _a, _a ) -> List[Any]: __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a ) __SCREAMING_SNAKE_CASE = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self, _a, _a, _a ) -> int: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self, _a, _a, _a, _a ) -> List[Any]: __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, labels=_a ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self, _a, _a, _a ) -> List[str]: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, output_hidden_states=_a ) __SCREAMING_SNAKE_CASE = output_from_no_past["hidden_states"][0] __SCREAMING_SNAKE_CASE = model( _a, attention_mask=_a, past_key_values=_a, output_hidden_states=_a, )["hidden_states"][0] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a, hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> int: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_a, _a, _a ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_a, _a, _a ) def __lowerCAmelCase ( self ) -> str: # This regression test was failing with PyTorch < 1.3 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder(_a, _a, _a ) def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_a, _a, _a ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_a ) @slow def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = "abeja/gpt-neox-japanese-2.7b" __SCREAMING_SNAKE_CASE = ["ใƒ‡ใƒผใ‚ฟใ‚ตใ‚คใ‚จใƒณใƒ†ใ‚ฃใ‚นใƒˆใจใฏใ€", "100ๅนดๅพŒใซๅฟ…่ฆใจใ•ใ‚Œใ‚‹ไผš็คพใฏใ€", "ใƒ•ใƒซใƒชใƒขใƒผใƒˆใฎ็’ฐๅขƒใงๅƒใใŸใ‚ใซๅฟ…่ฆใชใ“ใจใฏใ€", "ๅ›ฝๅขƒใฎ้•ทใ„ใƒˆใƒณใƒใƒซใ‚’ๆŠœใ‘ใ‚‹ใจ", "็พŽๅ‘ณใ—ใ„ๆ—ฅๆœฌ้ฃŸใจใ„ใˆใฐใ€"] __SCREAMING_SNAKE_CASE = [ "ใƒ‡ใƒผใ‚ฟใ‚ตใ‚คใ‚จใƒณใƒ†ใ‚ฃใ‚นใƒˆใจใฏใ€ใƒ‡ใƒผใ‚ฟใ‚’ๅˆ†ๆžใ—ใ€ใƒ“ใ‚ธใƒใ‚นใซๅฝน็ซ‹ใค็Ÿฅ่ฆ‹ใ‚’ๅฐŽใๅ‡บใ™ๅฐ‚้–€ๅฎถใฎใ“ใจใงใ™ใ€‚", "100ๅนดๅพŒใซๅฟ…่ฆใจใ•ใ‚Œใ‚‹ไผš็คพใฏใ€ใ€Œไบบใ€ใŒไธญๅฟƒใฎไผš็คพใงใ™ใ€‚", "ใƒ•ใƒซใƒชใƒขใƒผใƒˆใฎ็’ฐๅขƒใงๅƒใใŸใ‚ใซๅฟ…่ฆใชใ“ใจใฏใ€ใ€Œ่‡ชๅˆ†ใฎๆ™‚้–“ใ‚’ใ‚ณใƒณใƒˆใƒญใƒผใƒซใ™ใ‚‹ใ€ใ“ใจใงใ™ใ€‚", "ๅ›ฝๅขƒใฎ้•ทใ„ใƒˆใƒณใƒใƒซใ‚’ๆŠœใ‘ใ‚‹ใจใ€ใใ“ใฏ้›ชๅ›ฝใ ใฃใŸใ€‚", "็พŽๅ‘ณใ—ใ„ๆ—ฅๆœฌ้ฃŸใจใ„ใˆใฐใ€ใ‚„ใฃใฑใ‚ŠใŠๅฏฟๅธใงใ™ใ‚ˆใญใ€‚", ] __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseTokenizer.from_pretrained(_a ) __SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM.from_pretrained(_a ) __SCREAMING_SNAKE_CASE = [] for prompt in prompts: __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt" ).input_ids __SCREAMING_SNAKE_CASE = model.generate(_a, max_length=50 ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a, skip_special_tokens=_a ) predicted_outputs += generated_string self.assertListEqual(_a, _a )
693
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __SCREAMING_SNAKE_CASE = ksize + 1 __SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__snake_case ): for x in range(__snake_case ): # distance from center __SCREAMING_SNAKE_CASE = x - ksize // 2 __SCREAMING_SNAKE_CASE = y - ksize // 2 # degree to radiant __SCREAMING_SNAKE_CASE = theta / 180 * np.pi __SCREAMING_SNAKE_CASE = np.cos(_theta ) __SCREAMING_SNAKE_CASE = np.sin(_theta ) # get kernel x __SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py # get kernel y __SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py # fill kernel __SCREAMING_SNAKE_CASE = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _snake_case : Union[str, Any] = imread('../image_data/lena.jpg') # turn image in gray scale value _snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _snake_case : int = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: _snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _snake_case : Optional[Any] = out / out.max() * 2_55 _snake_case : Union[str, Any] = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
693
1
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _snake_case : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =AlbertTokenizer SCREAMING_SNAKE_CASE__ =AlbertTokenizerFast SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = AlbertTokenizer(_a ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self, _a ) -> Dict: __SCREAMING_SNAKE_CASE = "this is a test" __SCREAMING_SNAKE_CASE = "this is a test" return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = "<pad>" __SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ), _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ), _a ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], "<pad>" ) self.assertEqual(vocab_keys[1], "<unk>" ) self.assertEqual(vocab_keys[-1], "โ–eloquent" ) self.assertEqual(len(_a ), 3_00_00 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size, 3_00_00 ) def __lowerCAmelCase ( self ) -> Any: if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsรฉ." __SCREAMING_SNAKE_CASE = tokenizer.tokenize(_a ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a, _a ) __SCREAMING_SNAKE_CASE = tokenizer.encode(_a, add_special_tokens=_a ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_a, add_special_tokens=_a ) self.assertListEqual(_a, _a ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(_a ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_a ) self.assertListEqual(_a, _a ) def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = AlbertTokenizer(_a, keep_accents=_a ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a, ["โ–this", "โ–is", "โ–a", "โ–test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ), [48, 25, 21, 12_89] ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." ) self.assertListEqual( _a, ["โ–i", "โ–was", "โ–born", "โ–in", "โ–9", "2000", ",", "โ–and", "โ–this", "โ–is", "โ–fal", "s", "รฉ", "."] ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual(_a, [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a, ["โ–i", "โ–was", "โ–born", "โ–in", "โ–9", "2000", ",", "โ–and", "โ–this", "โ–is", "โ–fal", "s", "<unk>", "."], ) def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = AlbertTokenizer(_a ) __SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" ) __SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_a ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_a, _a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __lowerCAmelCase ( self ) -> Optional[int]: # fmt: off __SCREAMING_SNAKE_CASE = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
693
def _A ( __snake_case :int ) -> int: """simple docstring""" assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0''' raise ValueError(__snake_case ) else: __SCREAMING_SNAKE_CASE = sylvester(number - 1 ) __SCREAMING_SNAKE_CASE = num - 1 __SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
693
1
from __future__ import annotations def _A ( __snake_case :list , __snake_case :int ) -> List[str]: """simple docstring""" if len(__snake_case ) <= 1 or n <= 1: return insert_next(__snake_case , n - 1 ) rec_insertion_sort(__snake_case , n - 1 ) def _A ( __snake_case :list , __snake_case :int ) -> Optional[Any]: """simple docstring""" if index >= len(__snake_case ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( collection[index], collection[index - 1], ) insert_next(__snake_case , index + 1 ) if __name__ == "__main__": _snake_case : str = input('Enter integers separated by spaces: ') _snake_case : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
693
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_a ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @require_tf def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @slow @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
693
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = ["a", "b", "c"] # Defaults to last layer if both are None __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(_a, _a, _a ) self.assertEqual(_a, ["c"] ) self.assertEqual(_a, [2] ) # Out indices set to match out features __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(["a", "c"], _a, _a ) self.assertEqual(_a, ["a", "c"] ) self.assertEqual(_a, [0, 2] ) # Out features set to match out indices __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(_a, [0, 2], _a ) self.assertEqual(_a, ["a", "c"] ) self.assertEqual(_a, [0, 2] ) # Out features selected from negative indices __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(_a, [-3, -1], _a ) self.assertEqual(_a, ["a", "c"] ) self.assertEqual(_a, [-3, -1] ) def __lowerCAmelCase ( self ) -> int: # Stage names must be set with self.assertRaises(_a ): verify_out_features_out_indices(["a", "b"], (0, 1), _a ) # Out features must be a list with self.assertRaises(_a ): verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(_a ): verify_out_features_out_indices(["a", "b"], (0, 1), ["a"] ) # Out indices must be a list or tuple with self.assertRaises(_a ): verify_out_features_out_indices(_a, 0, ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(_a ): verify_out_features_out_indices(_a, (0, 1), ["a"] ) # Out features and out indices must be the same length with self.assertRaises(_a ): verify_out_features_out_indices(["a", "b"], (0,), ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(_a ): verify_out_features_out_indices(["a", "b"], (0, 2), ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(_a ): verify_out_features_out_indices(["b", "a"], (0, 1), ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"], (0, 1, -1), ["a", "b", "c", "d"] ) def __lowerCAmelCase ( self ) -> int: __SCREAMING_SNAKE_CASE = BackboneMixin() __SCREAMING_SNAKE_CASE = ["a", "b", "c"] __SCREAMING_SNAKE_CASE = ["a", "c"] __SCREAMING_SNAKE_CASE = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features, ["a", "c"] ) self.assertEqual(backbone.out_indices, [0, 2] ) # Check out features and indices are updated correctly __SCREAMING_SNAKE_CASE = ["a", "b"] self.assertEqual(backbone.out_features, ["a", "b"] ) self.assertEqual(backbone.out_indices, [0, 1] ) __SCREAMING_SNAKE_CASE = [-3, -1] self.assertEqual(backbone.out_features, ["a", "c"] ) self.assertEqual(backbone.out_indices, [-3, -1] )
693
from __future__ import annotations import math def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] __SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _snake_case : Dict = get_tests_dir('fixtures/dummy_feature_extractor_config.json') _snake_case : str = get_tests_dir('fixtures/vocab.json') _snake_case : Dict = get_tests_dir('fixtures') class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def __lowerCAmelCase ( self ) -> int: __SCREAMING_SNAKE_CASE = 0 def __lowerCAmelCase ( self ) -> int: __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = WavaVecaConfig() __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(_a ) processor.save_pretrained(_a ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(_a, os.path.join(_a, _a ) ) copyfile(_a, os.path.join(_a, "vocab.json" ) ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor(_a, _a ) # save in new folder processor.save_pretrained(_a ) # drop `processor_class` in tokenizer with open(os.path.join(_a, _a ), "r" ) as f: __SCREAMING_SNAKE_CASE = json.load(_a ) config_dict.pop("processor_class" ) with open(os.path.join(_a, _a ), "w" ) as f: f.write(json.dumps(_a ) ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor(_a, _a ) # save in new folder processor.save_pretrained(_a ) # drop `processor_class` in feature extractor with open(os.path.join(_a, _a ), "r" ) as f: __SCREAMING_SNAKE_CASE = json.load(_a ) config_dict.pop("processor_class" ) with open(os.path.join(_a, _a ), "w" ) as f: f.write(json.dumps(_a ) ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(_a ) # copy relevant files copyfile(_a, os.path.join(_a, "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(_a, _a ), "w" ) as f: f.write("{}" ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) def __lowerCAmelCase ( self ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=_a ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor", trust_remote_code=_a ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__, "NewProcessor" ) __SCREAMING_SNAKE_CASE = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" ) __SCREAMING_SNAKE_CASE = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" ) # Test we can also load the slow version __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=_a, use_fast=_a ) __SCREAMING_SNAKE_CASE = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__, "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" ) def __lowerCAmelCase ( self ) -> Dict: try: AutoConfig.register("custom", _a ) AutoFeatureExtractor.register(_a, _a ) AutoTokenizer.register(_a, slow_tokenizer_class=_a ) AutoProcessor.register(_a, _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoProcessor.register(_a, _a ) # Now that the config is registered, it can be used as any other config with the auto-API __SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = os.path.join(_a, "vocab.txt" ) with open(_a, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __SCREAMING_SNAKE_CASE = CustomTokenizer(_a ) __SCREAMING_SNAKE_CASE = CustomProcessor(_a, _a ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(_a ) __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(_a ) self.assertIsInstance(_a, _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Tuple: class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =False class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =False class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ ="""AutoFeatureExtractor""" SCREAMING_SNAKE_CASE__ ="""AutoTokenizer""" SCREAMING_SNAKE_CASE__ =False try: AutoConfig.register("custom", _a ) AutoFeatureExtractor.register(_a, _a ) AutoTokenizer.register(_a, slow_tokenizer_class=_a ) AutoProcessor.register(_a, _a ) # If remote code is not set, the default is to use local classes. __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__, "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=_a ) self.assertEqual(processor.__class__.__name__, "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=_a ) self.assertEqual(processor.__class__.__name__, "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__, "BertTokenizerFast" ) def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__, "ConvNextImageProcessor" ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): SCREAMING_SNAKE_CASE__ =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def __lowerCAmelCase ( cls ) -> Dict: __SCREAMING_SNAKE_CASE = TOKEN HfFolder.save_token(_a ) @classmethod def __lowerCAmelCase ( cls ) -> List[Any]: try: delete_repo(token=cls._token, repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-processor" ) except HTTPError: pass def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(_a, "test-processor" ), push_to_hub=_a, use_auth_token=self._token ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(new_processor.feature_extractor, _a ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(_a, "test-processor-org" ), push_to_hub=_a, use_auth_token=self._token, organization="valid_org", ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(_a, getattr(new_processor.feature_extractor, _a ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Dict: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = os.path.join(_a, "vocab.txt" ) with open(_a, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __SCREAMING_SNAKE_CASE = CustomTokenizer(_a ) __SCREAMING_SNAKE_CASE = CustomProcessor(_a, _a ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''', token=self._token ) __SCREAMING_SNAKE_CASE = Repository(_a, clone_from=f'''{USER}/test-dynamic-processor''', token=self._token ) processor.save_pretrained(_a ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map, { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", }, ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(_a, "tokenizer_config.json" ) ) as f: __SCREAMING_SNAKE_CASE = json.load(_a ) self.assertDictEqual( tokenizer_config["auto_map"], { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", }, ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(_a, "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(_a, "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(_a, "custom_processing.py" ) ) ) repo.push_to_hub() __SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''', trust_remote_code=_a ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__, "CustomProcessor" )
693
def _A ( __snake_case :bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def _A ( __snake_case :str ) -> bytes: """simple docstring""" if (len(__snake_case ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
1
def _A ( __snake_case :int , __snake_case :int ) -> bool: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
693
from functools import lru_cache def _A ( __snake_case :int ) -> set: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__snake_case ) if n > 1: factors.add(__snake_case ) return factors @lru_cache def _A ( __snake_case :int ) -> int: """simple docstring""" return len(unique_prime_factors(__snake_case ) ) def _A ( __snake_case :list ) -> bool: """simple docstring""" return len(set(__snake_case ) ) in (0, 1) def _A ( __snake_case :int ) -> list: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 while True: # Increment each value of a generated range __SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. __SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group] checker.append(__snake_case ) # If all numbers in the list are equal, return the group variable. if equality(__snake_case ): return group # Increment our base variable by 1 base += 1 def _A ( __snake_case :int = 4 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = run(__snake_case ) return results[0] if len(__snake_case ) else None if __name__ == "__main__": print(solution())
693
1
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _snake_case : Any = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] _snake_case : Tuple = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] _snake_case : str = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _snake_case : Any = F"""down_blocks.{i}.resnets.{j}.""" _snake_case : List[Any] = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _snake_case : str = F"""down_blocks.{i}.attentions.{j}.""" _snake_case : Any = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _snake_case : Dict = F"""up_blocks.{i}.resnets.{j}.""" _snake_case : List[str] = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _snake_case : List[str] = F"""up_blocks.{i}.attentions.{j}.""" _snake_case : List[str] = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _snake_case : Optional[Any] = F"""down_blocks.{i}.downsamplers.0.conv.""" _snake_case : Optional[int] = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _snake_case : str = F"""up_blocks.{i}.upsamplers.0.""" _snake_case : Optional[int] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _snake_case : int = 'mid_block.attentions.0.' _snake_case : List[str] = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _snake_case : Any = F"""mid_block.resnets.{j}.""" _snake_case : Optional[int] = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _A ( __snake_case :List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: __SCREAMING_SNAKE_CASE = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: __SCREAMING_SNAKE_CASE = v.replace(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: __SCREAMING_SNAKE_CASE = v.replace(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = v __SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _snake_case : int = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): _snake_case : Union[str, Any] = F"""encoder.down_blocks.{i}.resnets.{j}.""" _snake_case : Tuple = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _snake_case : Optional[int] = F"""down_blocks.{i}.downsamplers.0.""" _snake_case : List[str] = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _snake_case : Any = F"""up_blocks.{i}.upsamplers.0.""" _snake_case : List[str] = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _snake_case : str = F"""decoder.up_blocks.{i}.resnets.{j}.""" _snake_case : List[Any] = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _snake_case : Optional[int] = F"""mid_block.resnets.{i}.""" _snake_case : Optional[Any] = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _snake_case : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def _A ( __snake_case :int ) -> List[Any]: """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _A ( __snake_case :str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: __SCREAMING_SNAKE_CASE = v.replace(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: __SCREAMING_SNAKE_CASE = v.replace(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = v __SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()} __SCREAMING_SNAKE_CASE = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f'''mid.attn_1.{weight_name}.weight''' in k: print(f'''Reshaping {k} for SD format''' ) __SCREAMING_SNAKE_CASE = reshape_weight_for_sd(__snake_case ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _snake_case : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] _snake_case : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _snake_case : str = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _snake_case : Tuple = {'q': 0, 'k': 1, 'v': 2} def _A ( __snake_case :Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): __SCREAMING_SNAKE_CASE = k[: -len(".q_proj.weight" )] __SCREAMING_SNAKE_CASE = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: __SCREAMING_SNAKE_CASE = [None, None, None] __SCREAMING_SNAKE_CASE = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): __SCREAMING_SNAKE_CASE = k[: -len(".q_proj.bias" )] __SCREAMING_SNAKE_CASE = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: __SCREAMING_SNAKE_CASE = [None, None, None] __SCREAMING_SNAKE_CASE = v continue __SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __snake_case ) __SCREAMING_SNAKE_CASE = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) __SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __snake_case ) __SCREAMING_SNAKE_CASE = torch.cat(__snake_case ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) __SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __snake_case ) __SCREAMING_SNAKE_CASE = torch.cat(__snake_case ) return new_state_dict def _A ( __snake_case :Optional[int] ) -> List[Any]: """simple docstring""" return text_enc_dict if __name__ == "__main__": _snake_case : int = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) _snake_case : Tuple = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _snake_case : List[Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') _snake_case : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') _snake_case : Dict = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _snake_case : List[str] = load_file(unet_path, device='cpu') else: _snake_case : Optional[int] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') _snake_case : int = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): _snake_case : Optional[Any] = load_file(vae_path, device='cpu') else: _snake_case : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') _snake_case : Optional[int] = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): _snake_case : Any = load_file(text_enc_path, device='cpu') else: _snake_case : Tuple = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') _snake_case : Dict = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model _snake_case : List[str] = convert_unet_state_dict(unet_state_dict) _snake_case : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _snake_case : str = convert_vae_state_dict(vae_state_dict) _snake_case : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _snake_case : Any = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _snake_case : Union[str, Any] = {'transformer.' + k: v for k, v in text_enc_dict.items()} _snake_case : Any = convert_text_enc_state_dict_vaa(text_enc_dict) _snake_case : List[Any] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: _snake_case : Tuple = convert_text_enc_state_dict(text_enc_dict) _snake_case : List[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _snake_case : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _snake_case : List[str] = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _snake_case : Optional[Any] = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
693
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _A ( __snake_case :Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(__snake_case , __snake_case ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = "huggingface/label-files" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 400 __SCREAMING_SNAKE_CASE = "kinetics400-id2label.json" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 174 __SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if "small" in model_name: __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 1536 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 192 __SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 1024 __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 1280 __SCREAMING_SNAKE_CASE = 5120 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 640 __SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def _A ( __snake_case :List[Any] ) -> Optional[int]: """simple docstring""" if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace("encoder." , "" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" ) return name def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case ) if key.startswith("encoder." ): __SCREAMING_SNAKE_CASE = key.replace("encoder." , "" ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split("." ) if key.startswith("decoder.blocks" ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = "decoder.decoder_layers." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = "videomae.encoder.layer." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _A ( ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __SCREAMING_SNAKE_CASE = np.load(__snake_case ) return list(__snake_case ) def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = "pytorch_model.bin" gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" ) if "model" in files: __SCREAMING_SNAKE_CASE = files["model"] else: __SCREAMING_SNAKE_CASE = files["module"] __SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case ) __SCREAMING_SNAKE_CASE = model(**__snake_case ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(__snake_case , __snake_case , atol=1e-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐Ÿค— hub.' ) _snake_case : Optional[int] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
693
1
import logging from transformers import PretrainedConfig _snake_case : Optional[Any] = logging.getLogger(__name__) _snake_case : Union[str, Any] = { 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ ="""bertabs""" def __init__( self, _a=3_05_22, _a=5_12, _a=6, _a=5_12, _a=8, _a=5_12, _a=0.2, _a=6, _a=7_68, _a=8, _a=20_48, _a=0.2, **_a, ) -> Union[str, Any]: super().__init__(**_a ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_pos __SCREAMING_SNAKE_CASE = enc_layers __SCREAMING_SNAKE_CASE = enc_hidden_size __SCREAMING_SNAKE_CASE = enc_heads __SCREAMING_SNAKE_CASE = enc_ff_size __SCREAMING_SNAKE_CASE = enc_dropout __SCREAMING_SNAKE_CASE = dec_layers __SCREAMING_SNAKE_CASE = dec_hidden_size __SCREAMING_SNAKE_CASE = dec_heads __SCREAMING_SNAKE_CASE = dec_ff_size __SCREAMING_SNAKE_CASE = dec_dropout
693
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, *_a, **_a ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead.", _a, ) super().__init__(*_a, **_a )
693
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _snake_case : int = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, **_a ) -> Dict: requires_backends(self, ["bs4"] ) super().__init__(**_a ) def __lowerCAmelCase ( self, _a ) -> int: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE = parent.find_all(child.name, recursive=_a ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_a ) else next(i for i, s in enumerate(_a, 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def __lowerCAmelCase ( self, _a ) -> Dict: __SCREAMING_SNAKE_CASE = BeautifulSoup(_a, "html.parser" ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for element in html_code.descendants: if type(_a ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE = html.unescape(_a ).strip() if not text_in_this_tag: continue all_doc_strings.append(_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.xpath_soup(_a ) stringaxtag_seq.append(_a ) stringaxsubs_seq.append(_a ) if len(_a ) != len(_a ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(_a ) != len(_a ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def __lowerCAmelCase ( self, _a, _a ) -> str: __SCREAMING_SNAKE_CASE = "" for tagname, subs in zip(_a, _a ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self, _a ) -> BatchFeature: __SCREAMING_SNAKE_CASE = False # Check that strings has a valid type if isinstance(_a, _a ): __SCREAMING_SNAKE_CASE = True elif isinstance(_a, (list, tuple) ): if len(_a ) == 0 or isinstance(html_strings[0], _a ): __SCREAMING_SNAKE_CASE = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " f'''but is of type {type(_a )}.''' ) __SCREAMING_SNAKE_CASE = bool(isinstance(_a, (list, tuple) ) and (isinstance(html_strings[0], _a )) ) if not is_batched: __SCREAMING_SNAKE_CASE = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_three_from_single(_a ) nodes.append(_a ) __SCREAMING_SNAKE_CASE = [] for node, tag_list, sub_list in zip(_a, _a, _a ): __SCREAMING_SNAKE_CASE = self.construct_xpath(_a, _a ) xpath_strings.append(_a ) xpaths.append(_a ) # return as Dict __SCREAMING_SNAKE_CASE = {"nodes": nodes, "xpaths": xpaths} __SCREAMING_SNAKE_CASE = BatchFeature(data=_a, tensor_type=_a ) return encoded_inputs
693
from math import sqrt def _A ( __snake_case :int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(__snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(__snake_case ): total += i + n // i elif i == sqrt(__snake_case ): total += i return total - n def _A ( __snake_case :int = 1_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , __snake_case ) if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
693
1
import sys def _A ( __snake_case :Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = len(__snake_case ) __SCREAMING_SNAKE_CASE = [[0 for x in range(__snake_case )] for x in range(__snake_case )] __SCREAMING_SNAKE_CASE = [[0 for x in range(__snake_case )] for x in range(__snake_case )] for chain_length in range(2 , __snake_case ): for a in range(1 , n - chain_length + 1 ): __SCREAMING_SNAKE_CASE = a + chain_length - 1 __SCREAMING_SNAKE_CASE = sys.maxsize for c in range(__snake_case , __snake_case ): __SCREAMING_SNAKE_CASE = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: __SCREAMING_SNAKE_CASE = cost __SCREAMING_SNAKE_CASE = c return matrix, sol def _A ( __snake_case :int , __snake_case :List[Any] , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if i == j: print("A" + str(__snake_case ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(__snake_case , __snake_case , optimal_solution[i][j] ) print_optiomal_solution(__snake_case , optimal_solution[i][j] + 1 , __snake_case ) print(")" , end=" " ) def _A ( ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = [30, 35, 15, 5, 10, 20, 25] __SCREAMING_SNAKE_CASE = len(__snake_case ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = matrix_chain_order(__snake_case ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(__snake_case , 1 , n - 1 ) if __name__ == "__main__": main()
693
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _snake_case : Tuple = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys _snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = encoder_seq_length __SCREAMING_SNAKE_CASE = decoder_seq_length # For common tests __SCREAMING_SNAKE_CASE = self.decoder_seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = d_ff __SCREAMING_SNAKE_CASE = relative_attention_num_buckets __SCREAMING_SNAKE_CASE = dropout_rate __SCREAMING_SNAKE_CASE = initializer_factor __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = decoder_layers def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig.from_pretrained("google/umt5-base" ) def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int: if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = config.num_attention_heads __SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a ) return config, input_dict def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig( vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return TaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model( input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, ) __SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a ) __SCREAMING_SNAKE_CASE = result.last_hidden_state __SCREAMING_SNAKE_CASE = result.past_key_values __SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ), config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ), 4 ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = model(_a ) __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval() __SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE__ =[0.8, 0.9] def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision" ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs[0] __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) __SCREAMING_SNAKE_CASE = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), } for attn_name, (name, mask) in zip(_a, head_masking.items() ): __SCREAMING_SNAKE_CASE = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_heads, device=_a ) __SCREAMING_SNAKE_CASE = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, ) # We check the state of decoder_attentions and cross_attentions just from the last step __SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __lowerCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a ) __SCREAMING_SNAKE_CASE = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_a, _a ) __SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) ) __SCREAMING_SNAKE_CASE = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a ) self.assertEqual(_a, _a )
693
1
from __future__ import annotations from typing import Any def _A ( __snake_case :list[Any] ) -> None: """simple docstring""" create_state_space_tree(__snake_case , [] , 0 ) def _A ( __snake_case :list[Any] , __snake_case :list[Any] , __snake_case :int ) -> None: """simple docstring""" if index == len(__snake_case ): print(__snake_case ) return create_state_space_tree(__snake_case , __snake_case , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__snake_case , __snake_case , index + 1 ) current_subsequence.pop() if __name__ == "__main__": _snake_case : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
693
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __SCREAMING_SNAKE_CASE = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) __SCREAMING_SNAKE_CASE = model.state_dict() def to_tf_var_name(__snake_case :str ): for patt, repl in iter(__snake_case ): __SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case ) return f'''bert/{name}''' def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ): __SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype ) __SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case ) __SCREAMING_SNAKE_CASE = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __SCREAMING_SNAKE_CASE = torch_tensor.T __SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = session.run(__snake_case ) print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) __SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _A ( __snake_case :str=None ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) __SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
693
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case : List[str] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""] def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str: super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_a, np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa ) elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a ).T] # verify inputs are valid for idx, example in enumerate(_a ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( _a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a ) return padded_inputs
693
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case : Tuple = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : str = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
693
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
1
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler() __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=_a, scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2, output_type="numpy", generator=_a ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2, output_type="numpy", generator=_a, return_dict=_a )[ 0 ] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = "google/ncsnpp-church-256" __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(_a ) __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler.from_pretrained(_a ) __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=_a, scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=10, output_type="numpy", generator=_a ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
693
def _A ( __snake_case :int = 400_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
693
1
from maths.prime_check import is_prime def _A ( __snake_case :int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): __SCREAMING_SNAKE_CASE = f'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
693
from __future__ import annotations _snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(__snake_case ) for i in range(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(__snake_case ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case : Optional[Any] = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
693
1
def _A ( __snake_case :int , __snake_case :int ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def _A ( ) -> None: """simple docstring""" print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self, _a ) -> Any: __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __repr__( self ) -> str: return f'''Node({self.data})''' class __SCREAMING_SNAKE_CASE : def __init__( self ) -> Tuple: __SCREAMING_SNAKE_CASE = None def __iter__( self ) -> Any: __SCREAMING_SNAKE_CASE = self.head while node: yield node.data __SCREAMING_SNAKE_CASE = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(_a ) for item in self] ) def __getitem__( self, _a ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, _a, _a ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) __SCREAMING_SNAKE_CASE = self.head for _ in range(_a ): __SCREAMING_SNAKE_CASE = current.next __SCREAMING_SNAKE_CASE = data def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(len(self ), _a ) def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(0, _a ) def __lowerCAmelCase ( self, _a, _a ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) __SCREAMING_SNAKE_CASE = Node(_a ) if self.head is None: __SCREAMING_SNAKE_CASE = new_node elif index == 0: __SCREAMING_SNAKE_CASE = self.head # link new_node to head __SCREAMING_SNAKE_CASE = new_node else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = new_node def __lowerCAmelCase ( self ) -> None: # print every node data print(self ) def __lowerCAmelCase ( self ) -> Any: return self.delete_nth(0 ) def __lowerCAmelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self, _a = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) __SCREAMING_SNAKE_CASE = self.head # default first node if index == 0: __SCREAMING_SNAKE_CASE = self.head.next else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next.next return delete_node.data def __lowerCAmelCase ( self ) -> bool: return self.head is None def __lowerCAmelCase ( self ) -> None: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = self.head while current: # Store the current node's next node. __SCREAMING_SNAKE_CASE = current.next # Make the current node's next point backwards __SCREAMING_SNAKE_CASE = prev # Make the previous node be the current node __SCREAMING_SNAKE_CASE = current # Make the current node the next node (to progress iteration) __SCREAMING_SNAKE_CASE = next_node # Return prev in order to put the head at the end __SCREAMING_SNAKE_CASE = prev def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __SCREAMING_SNAKE_CASE = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [ -9, 100, Node(7734_5112 ), "dlrow olleH", 7, 5555, 0, -1_9_2.5_5_5_5_5, "Hello, world!", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] __SCREAMING_SNAKE_CASE = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __SCREAMING_SNAKE_CASE = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __SCREAMING_SNAKE_CASE = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> Union[str, Any]: """simple docstring""" from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__snake_case ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) __SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip() print("New list:" ) print(__snake_case ) print(f'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
693
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(DEISMultistepScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 25),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sample, sample for t in range(_a, time_step + scheduler.config.solver_order + 1 ): __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> int: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, _a=None, **_a ) -> Tuple: if scheduler is None: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10] __SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults __SCREAMING_SNAKE_CASE = DEISMultistepScheduler(**self.get_scheduler_config() ) __SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_a ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 __SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config ) __SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config ) __SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config ) __SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_a ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def __lowerCAmelCase ( self ) -> str: self.check_over_configs(thresholding=_a ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_a, prediction_type=_a, sample_max_value=_a, algorithm_type="deis", solver_order=_a, solver_type=_a, ) def __lowerCAmelCase ( self ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_a, solver_type=_a, prediction_type=_a, algorithm_type=_a, ) __SCREAMING_SNAKE_CASE = self.full_loop( solver_order=_a, solver_type=_a, prediction_type=_a, algorithm_type=_a, ) assert not torch.isnan(_a ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Optional[int]: self.check_over_configs(lower_order_final=_a ) self.check_over_configs(lower_order_final=_a ) def __lowerCAmelCase ( self ) -> Optional[int]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=_a, time_step=0 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_a, dynamic_thresholding_ratio=0 ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half() scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample assert sample.dtype == torch.floataa
693
import argparse import json from tqdm import tqdm def _A ( ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , ) __SCREAMING_SNAKE_CASE = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: __SCREAMING_SNAKE_CASE = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): __SCREAMING_SNAKE_CASE = dpr_record["question"] __SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(__snake_case ) + "\n" ) if __name__ == "__main__": main()
693
1
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def _A ( __snake_case :np.ndarray , __snake_case :np.ndarray , __snake_case :np.ndarray , __snake_case :int , __snake_case :int ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = cva.getAffineTransform(__snake_case , __snake_case ) return cva.warpAffine(__snake_case , __snake_case , (rows, cols) ) if __name__ == "__main__": # read original image _snake_case : List[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value _snake_case : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape _snake_case , _snake_case : Tuple = gray_img.shape # set different points to rotate image _snake_case : int = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa) _snake_case : str = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa) _snake_case : Dict = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa) _snake_case : List[Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa) # add all rotated images in a list _snake_case : str = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations _snake_case : Any = plt.figure(1) _snake_case : Optional[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
693
def _A ( __snake_case :int = 10**9 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
693
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _snake_case , _snake_case , _snake_case : List[Any] = False, False, False @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =None SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =None # Automatically constructed SCREAMING_SNAKE_CASE__ ="dict" SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Optional[int]: return self.pa_type def __lowerCAmelCase ( self, _a ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_a, _a ): return {"bytes": None, "path": value} elif isinstance(_a, _a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE = BytesIO() sf.write(_a, value["array"], value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67 __SCREAMING_SNAKE_CASE = BytesIO(bytes() ) sf.write(_a, _a, value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __lowerCAmelCase ( self, _a, _a = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __SCREAMING_SNAKE_CASE = token_per_repo_id or {} __SCREAMING_SNAKE_CASE = path.split("::" )[-1] try: __SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"] __SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE = None with xopen(_a, "rb", use_auth_token=_a ) as f: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) __SCREAMING_SNAKE_CASE = array.T if self.mono: __SCREAMING_SNAKE_CASE = librosa.to_mono(_a ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __lowerCAmelCase ( self, _a ) -> pa.StructArray: if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("bytes" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("path" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() ) return array_cast(_a, self.pa_type ) def __lowerCAmelCase ( self, _a ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_a ): with xopen(_a, "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() return bytes_ __SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) __SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(_a, self.pa_type )
693
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = 5_0 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :List[str] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , ) __magic_name__ :Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __magic_name__ :Union[str, Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to ฮท in paper and should be between [0, 1] # do x_t -> x_t-1 __magic_name__ :List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample __magic_name__ :Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __magic_name__ :int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __magic_name__ :Dict = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCAmelCase ), "This is a local test"
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00} config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, **_a ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> str: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
693
0
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = '''โ–''' __snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''} __snake_case = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } __snake_case = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } __snake_case = { '''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2, } def _A ( _lowercase ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = collections.OrderedDict() with open(_lowercase , 'r' , encoding='utf-8' ) as reader: __UpperCamelCase = reader.readlines() for index, token in enumerate(_lowercase ): __UpperCamelCase = token.rstrip('\n' ) __UpperCamelCase = index return vocab class __lowerCamelCase (_a ): _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ["""input_ids""", """attention_mask"""] def __init__( self: str,A_: int,A_: str="[SEP]",A_: List[Any]="[SEP]",A_: str="[SEP]",A_: Any="[UNK]",A_: Optional[int]="[PAD]",A_: List[str]="[CLS]",A_: Dict="[MASK]",A_: Optional[Dict[str, Any]] = None,**A_: str,): '''simple docstring''' __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_,eos_token=A_,sep_token=A_,unk_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-' | 'โ–a' # put special tokens and [unused] tokens into the vocab __UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): __UpperCamelCase = F'''[unused{i}]''' __UpperCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __UpperCamelCase = 12 __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(A_ ) def __getstate__( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None return state def __setstate__( self: List[Any],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ ) if token_ids_a is None: return ([0] * len(A_ )) + [1] return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' __UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self: List[Any],A_: str ): '''simple docstring''' return self.sp_model.encode(A_,out_type=A_ ) def snake_case_ ( self: Any,A_: Optional[int] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case_ ( self: str,A_: int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case_ ( self: Tuple,A_: int ): '''simple docstring''' __UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip() return out_string def snake_case_ ( self: Optional[int],A_: str,A_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_,'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def snake_case_ ( self: Tuple,A_: List[int],A_: Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] __UpperCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
1
import random from .binary_exp_mod import bin_exp_mod def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __SCREAMING_SNAKE_CASE = n - 1 __SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __SCREAMING_SNAKE_CASE = 0 while count < prec: __SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 ) __SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case ) if b != 1: __SCREAMING_SNAKE_CASE = True for _ in range(__snake_case ): if b == n - 1: __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _snake_case : int = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
693
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str=10 ) -> Optional[Any]: _A = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict=10 ) -> Optional[int]: _A = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) _A = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> Any: self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase ) def snake_case_ ( self : int ) -> Union[str, Any]: _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_00 ): _A = criterion(__lowerCAmelCase , __lowerCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def snake_case_ ( self : int ) -> Union[str, Any]: _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , ) for _ in range(10_00 ): _A = criterion(__lowerCAmelCase , __lowerCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" a__ : Optional[Any] = nn.Linear(50 , 50) if is_torch_available() else None a__ : Dict = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None a__ : List[Any] = 10 def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ) -> List[Any]: self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase ) def snake_case_ ( self : Any ) -> str: _A = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _A = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _A , _A = data _A = scheduler_func(self.optimizer , **__lowerCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _A = unwrap_schedule(__lowerCAmelCase , self.num_steps ) self.assertListAlmostEqual( __lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _A = scheduler_func(self.optimizer , **__lowerCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule _A = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : Any ) -> List[Any]: _A = fn def __call__( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[str] ) -> Dict: return self.fn(*__lowerCAmelCase , **__lowerCAmelCase ) @classmethod def snake_case_ ( self : Any , __lowerCAmelCase : Optional[Any] ) -> List[str]: _A = list(map(self , scheduler.lr_lambdas ) )
2
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __SCREAMING_SNAKE_CASE = ksize + 1 __SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__snake_case ): for x in range(__snake_case ): # distance from center __SCREAMING_SNAKE_CASE = x - ksize // 2 __SCREAMING_SNAKE_CASE = y - ksize // 2 # degree to radiant __SCREAMING_SNAKE_CASE = theta / 180 * np.pi __SCREAMING_SNAKE_CASE = np.cos(_theta ) __SCREAMING_SNAKE_CASE = np.sin(_theta ) # get kernel x __SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py # get kernel y __SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py # fill kernel __SCREAMING_SNAKE_CASE = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _snake_case : Union[str, Any] = imread('../image_data/lena.jpg') # turn image in gray scale value _snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _snake_case : int = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: _snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _snake_case : Optional[Any] = out / out.max() * 2_55 _snake_case : Union[str, Any] = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
693
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Tuple = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase : Dict = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } lowerCAmelCase : Optional[int] = {'facebook/blenderbot_small-90M': 5_12} def A_( A : int): UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char)) UpperCamelCase = char UpperCamelCase = set(A) return pairs class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , )-> Optional[int]: '''simple docstring''' super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding='utf-8' ) as vocab_handle: UpperCamelCase = json.load(A_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: UpperCamelCase = merges_handle.read().split('\n' )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in merges] UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase = {} @property def UpperCAmelCase_ ( self )-> int: '''simple docstring''' return len(self.encoder ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase_ ( self , A_ )-> str: '''simple docstring''' if token in self.cache: return self.cache[token] UpperCamelCase = re.sub('([.,!?()])' , R' \1' , A_ ) UpperCamelCase = re.sub('(\')' , R' \1 ' , A_ ) UpperCamelCase = re.sub(R'\s{2,}' , ' ' , A_ ) if "\n" in token: UpperCamelCase = token.replace('\n' , ' __newln__' ) UpperCamelCase = token.split(' ' ) UpperCamelCase = [] for token in tokens: if not len(A_ ): continue UpperCamelCase = token.lower() UpperCamelCase = tuple(A_ ) UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) UpperCamelCase = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(A_ ): try: UpperCamelCase = word.index(A_ , A_ ) new_word.extend(word[i:j] ) UpperCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(A_ ) UpperCamelCase = new_word if len(A_ ) == 1: break else: UpperCamelCase = get_pairs(A_ ) UpperCamelCase = '@@ '.join(A_ ) UpperCamelCase = word[:-4] UpperCamelCase = word words.append(A_ ) return " ".join(A_ ) def UpperCAmelCase_ ( self , A_ )-> List[str]: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = re.findall(R'\S+\n?' , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) ) return split_tokens def UpperCAmelCase_ ( self , A_ )-> int: '''simple docstring''' UpperCamelCase = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self , A_ )-> str: '''simple docstring''' return self.decoder.get(A_ , self.unk_token ) def UpperCAmelCase_ ( self , A_ )-> str: '''simple docstring''' UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip() return out_string def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) UpperCamelCase = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) UpperCamelCase = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file
3
def _A ( __snake_case :int ) -> int: """simple docstring""" assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0''' raise ValueError(__snake_case ) else: __SCREAMING_SNAKE_CASE = sylvester(number - 1 ) __SCREAMING_SNAKE_CASE = num - 1 __SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
693
0
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __UpperCamelCase : int = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ): inspect_dataset(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = path + '.py' assert script_name in os.listdir(_UpperCAmelCase ) assert "__pycache__" not in os.listdir(_UpperCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : int ): inspect_metric(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = path + '.py' assert script_name in os.listdir(_UpperCAmelCase ) assert "__pycache__" not in os.listdir(_UpperCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ): with pytest.raises(_UpperCAmelCase ): get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = get_dataset_config_names(_UpperCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ): lowerCAmelCase = get_dataset_infos(_UpperCAmelCase ) assert list(infos.keys() ) == expected_configs lowerCAmelCase = expected_configs[0] assert expected_config in infos lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ): lowerCAmelCase = get_dataset_infos(_UpperCAmelCase ) assert expected_config in infos lowerCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): with pytest.raises(_UpperCAmelCase ): get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
4
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_a ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @require_tf def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @slow @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
693
0
'''simple docstring''' def A (__lowerCamelCase :int ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): _lowerCAmelCase = f'Input value of [number={number}] must be an integer' raise TypeError(__lowerCamelCase ) if number < 0: return False _lowerCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
from __future__ import annotations import math def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] __SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
0
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :List[str] , **__A :Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Dict , **__A :Union[str, Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :str ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :List[str] , **__A :str ) -> List[str]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :Tuple , **__A :Optional[int] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :Optional[Any] , **__A :Any ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :List[Any] , *__A :Any , **__A :List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :List[Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :List[str] , **__A :int ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :Tuple , **__A :str ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :List[str] , **__A :Optional[Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Any , **__A :List[Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :List[Any] , **__A :int ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Union[str, Any] , *__A :Dict , **__A :str ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :Tuple , **__A :List[str] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Optional[Any] , *__A :Optional[int] , **__A :Union[str, Any] ) -> int: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Any , **__A :Union[str, Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :int , **__A :str ) -> List[str]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :str , *__A :int , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :List[Any] , **__A :Dict ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :Optional[Any] , **__A :str ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :str , **__A :int ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :int ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Optional[Any] , *__A :Optional[Any] , **__A :Optional[Any] ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :List[str] , **__A :int ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :List[Any] , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :Optional[Any] , **__A :List[str] ) -> Dict: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :List[Any] , **__A :Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Optional[int] , **__A :List[str] ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :Optional[Any] , **__A :List[Any] ) -> Tuple: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Optional[Any] , **__A :Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Union[str, Any] , *__A :str , **__A :str ) -> int: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :List[str] , **__A :Optional[Any] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Optional[Any] , **__A :Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :Optional[Any] , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Tuple , *__A :Dict , **__A :List[Any] ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Optional[int] , *__A :Any , **__A :int ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :Tuple , **__A :List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] )
6
def _A ( __snake_case :bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def _A ( __snake_case :str ) -> bytes: """simple docstring""" if (len(__snake_case ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys a = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() ) a = '''|'''.join(sys.argv[1:]) a = re.compile(rF'''^({joined_dirs}).*?\.py$''') a = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
7
from functools import lru_cache def _A ( __snake_case :int ) -> set: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__snake_case ) if n > 1: factors.add(__snake_case ) return factors @lru_cache def _A ( __snake_case :int ) -> int: """simple docstring""" return len(unique_prime_factors(__snake_case ) ) def _A ( __snake_case :list ) -> bool: """simple docstring""" return len(set(__snake_case ) ) in (0, 1) def _A ( __snake_case :int ) -> list: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 while True: # Increment each value of a generated range __SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. __SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group] checker.append(__snake_case ) # If all numbers in the list are equal, return the group variable. if equality(__snake_case ): return group # Increment our base variable by 1 base += 1 def _A ( __snake_case :int = 4 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = run(__snake_case ) return results[0] if len(__snake_case ) else None if __name__ == "__main__": print(solution())
693
0
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , _UpperCAmelCase=["stage1", "stage2", "stage3"] , _UpperCAmelCase=[1, 2, 3] , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Dict = batch_size __A : Any = image_size __A : List[str] = patch_size __A : List[str] = num_channels __A : Any = embed_dim __A : Dict = depths __A : List[Any] = num_heads __A : str = window_size __A : Union[str, Any] = mlp_ratio __A : str = qkv_bias __A : Dict = hidden_dropout_prob __A : Tuple = attention_probs_dropout_prob __A : int = drop_path_rate __A : str = hidden_act __A : str = use_absolute_embeddings __A : str = patch_norm __A : Dict = layer_norm_eps __A : List[str] = initializer_range __A : str = is_training __A : Union[str, Any] = scope __A : int = use_labels __A : Any = type_sequence_label_size __A : List[str] = encoder_stride __A : str = out_features __A : int = out_indices def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __A : Optional[int] = None if self.use_labels: __A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : Union[str, Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = MaskFormerSwinModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Any = model(_UpperCAmelCase) __A : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) __A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = MaskFormerSwinBackbone(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : List[Any] = model(_UpperCAmelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [16, 32, 64]) # verify ValueError with self.parent.assertRaises(_UpperCAmelCase): __A : Optional[int] = ['stem'] __A : Dict = MaskFormerSwinBackbone(config=_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = self.prepare_config_and_inputs() __A ,__A ,__A : int = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCAmelCase = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = MaskFormerSwinModelTester(self) __A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' )) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase) @unittest.skip('Swin does not use inputs_embeds') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip('Swin does not support feedforward chunking') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : str = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : str = model_class(_UpperCAmelCase) __A : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : str = [*signature.parameters.keys()] __A : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[int] = outputs.hidden_states __A : Union[str, Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # Swin has a different seq_length __A : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __A : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common() __A : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __A : List[Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[Any] = 3 __A : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) __A : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __A : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __A : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __A : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width)) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCAmelCase): __A : Any = 0 return t def check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase={}): with torch.no_grad(): __A : Dict = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase) __A : Union[str, Any] = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase).to_tuple() def recursive_check(_UpperCAmelCase , _UpperCAmelCase): if isinstance(_UpperCAmelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase , _UpperCAmelCase): recursive_check(_UpperCAmelCase , _UpperCAmelCase) elif isinstance(_UpperCAmelCase , _UpperCAmelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(_UpperCAmelCase , _UpperCAmelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCAmelCase) , set_nan_tensor_to_zero(_UpperCAmelCase) , atol=1e-5) , msg=( 'Tuple and dict output are not equal. Difference:' F' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:' F' {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}. Dict has' F' `nan`: {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}.' ) , ) recursive_check(_UpperCAmelCase , _UpperCAmelCase) for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) __A : Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True}) __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) __A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True}) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase , a__ ): lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCAmelCase = MaskFormerSwinConfig def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = MaskFormerSwinModelTester(self) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common() __A : Tuple = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __A : Optional[int] = backbone_class(_UpperCAmelCase) backbone.to(_UpperCAmelCase) backbone.eval() __A : Tuple = backbone(**_UpperCAmelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _UpperCAmelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True __A : Dict = backbone(**_UpperCAmelCase , output_hidden_states=_UpperCAmelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __A ,__A ,__A : List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: __A : Union[str, Any] = backbone(**_UpperCAmelCase , output_attentions=_UpperCAmelCase) self.assertIsNotNone(outputs.attentions)
8
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _A ( __snake_case :Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(__snake_case , __snake_case ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = "huggingface/label-files" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 400 __SCREAMING_SNAKE_CASE = "kinetics400-id2label.json" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 174 __SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if "small" in model_name: __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 1536 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 192 __SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 1024 __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 1280 __SCREAMING_SNAKE_CASE = 5120 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 640 __SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def _A ( __snake_case :List[Any] ) -> Optional[int]: """simple docstring""" if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace("encoder." , "" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" ) return name def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case ) if key.startswith("encoder." ): __SCREAMING_SNAKE_CASE = key.replace("encoder." , "" ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split("." ) if key.startswith("decoder.blocks" ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = "decoder.decoder_layers." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = "videomae.encoder.layer." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _A ( ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __SCREAMING_SNAKE_CASE = np.load(__snake_case ) return list(__snake_case ) def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = "pytorch_model.bin" gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" ) if "model" in files: __SCREAMING_SNAKE_CASE = files["model"] else: __SCREAMING_SNAKE_CASE = files["module"] __SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case ) __SCREAMING_SNAKE_CASE = model(**__snake_case ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(__snake_case , __snake_case , atol=1e-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐Ÿค— hub.' ) _snake_case : Optional[int] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
693
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class __lowerCAmelCase : """simple docstring""" def __init__( self : Tuple ): """simple docstring""" A__ = False def _a ( self : List[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[Any] ): """simple docstring""" if not self.initialized: A__ = RagRetriever( _snake_case , question_encoder_tokenizer=_snake_case , generator_tokenizer=_snake_case , index=_snake_case , init_retrieval=_snake_case , ) A__ = True def _a ( self : Union[str, Any] ): """simple docstring""" self.retriever.index.init_index() def _a ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[Any] ): """simple docstring""" A__ , A__ = self.retriever._main_retrieve(_snake_case , _snake_case ) return doc_ids, retrieved_doc_embeds class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple , _snake_case : str=None ): """simple docstring""" if index is not None and index.is_initialized() and len(_snake_case ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( _snake_case , question_encoder_tokenizer=_snake_case , generator_tokenizer=_snake_case , index=_snake_case , init_retrieval=_snake_case , ) A__ = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_snake_case , _snake_case , _snake_case , _snake_case ) for worker in self.retrieval_workers ] ) def _a ( self : Dict ): """simple docstring""" logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _a ( self : str , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] A__ , A__ = ray.get(random_worker.retrieve.remote(_snake_case , _snake_case ) ) else: A__ , A__ = self._main_retrieve(_snake_case , _snake_case ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_snake_case ) @classmethod def _a ( cls : List[Any] , _snake_case : int , _snake_case : Union[str, Any]=None , **_snake_case : int ): """simple docstring""" return super(_snake_case , cls ).get_tokenizers(_snake_case , _snake_case , **_snake_case ) @classmethod def _a ( cls : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=None , **_snake_case : Any ): """simple docstring""" A__ = kwargs.pop('config' , _snake_case ) or RagConfig.from_pretrained(_snake_case , **_snake_case ) A__ = RagTokenizer.from_pretrained(_snake_case , config=_snake_case ) A__ = rag_tokenizer.question_encoder A__ = rag_tokenizer.generator if indexed_dataset is not None: A__ = 'custom' A__ = CustomHFIndex(config.retrieval_vector_size , _snake_case ) else: A__ = cls._build_index(_snake_case ) return cls( _snake_case , question_encoder_tokenizer=_snake_case , generator_tokenizer=_snake_case , retrieval_workers=_snake_case , index=_snake_case , )
9
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, *_a, **_a ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead.", _a, ) super().__init__(*_a, **_a )
693
0
def _snake_case ( __snake_case , __snake_case ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCamelCase = str(bin(__snake_case ) )[2:] # remove the leading "0b" _UpperCamelCase = str(bin(__snake_case ) )[2:] _UpperCamelCase = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
10
from math import sqrt def _A ( __snake_case :int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(__snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(__snake_case ): total += i + n // i elif i == sqrt(__snake_case ): total += i return total - n def _A ( __snake_case :int = 1_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , __snake_case ) if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
693
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase (__A): """simple docstring""" return len(set(__A)) == len(__A) if __name__ == "__main__": import doctest doctest.testmod()
11
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' lowercase__ : int = tmp_path / """cache""" lowercase__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : int = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read() _check_parquet_dataset(lowercase_ , lowercase_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path / """cache""" lowercase__ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : int = features.copy() if features else default_expected_features lowercase__ : str = ( Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ : str = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read() _check_parquet_dataset(lowercase_ , lowercase_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' lowercase__ : Tuple = tmp_path / """cache""" lowercase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : List[Any] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read() _check_parquet_dataset(lowercase_ , lowercase_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' if issubclass(lowercase_ , lowercase_ ): lowercase__ : Any = parquet_path elif issubclass(lowercase_ , lowercase_ ): lowercase__ : str = [parquet_path] lowercase__ : Any = tmp_path / """cache""" lowercase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : List[str] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read() _check_parquet_dataset(lowercase_ , lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=("train",) ) -> int: '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) for split in splits: lowercase__ : List[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path / """cache""" lowercase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read() _check_parquet_datasetdict(lowercase_ , lowercase_ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path / """cache""" lowercase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : List[str] = features.copy() if features else default_expected_features lowercase__ : Tuple = ( Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ : Union[str, Any] = ParquetDatasetReader({"""train""": parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read() _check_parquet_datasetdict(lowercase_ , lowercase_ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' if split: lowercase__ : Union[str, Any] = {split: parquet_path} else: lowercase__ : Optional[int] = """train""" lowercase__ : List[Any] = {"""train""": parquet_path, """test""": parquet_path} lowercase__ : Dict = tmp_path / """cache""" lowercase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : int = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read() _check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Dict = ParquetDatasetWriter(lowercase_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 lowercase__ : Any = pq.ParquetFile(tmp_path / """foo.parquet""" ) lowercase__ : Optional[Any] = pf.read() assert dataset.data.table == output_table def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' lowercase__ : List[str] = str(shared_datadir / """test_image_rgb.jpg""" ) lowercase__ : Any = {"""image""": [image_path]} lowercase__ : str = Features({"""image""": Image()} ) lowercase__ : Dict = Dataset.from_dict(lowercase_ , features=lowercase_ ) lowercase__ : Any = ParquetDatasetWriter(lowercase_ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 lowercase__ : Tuple = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features lowercase__ : Tuple = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowercase_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' assert get_writer_batch_size(lowercase_ ) == expected
12
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = encoder_seq_length __SCREAMING_SNAKE_CASE = decoder_seq_length # For common tests __SCREAMING_SNAKE_CASE = self.decoder_seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = d_ff __SCREAMING_SNAKE_CASE = relative_attention_num_buckets __SCREAMING_SNAKE_CASE = dropout_rate __SCREAMING_SNAKE_CASE = initializer_factor __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = decoder_layers def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig.from_pretrained("google/umt5-base" ) def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int: if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = config.num_attention_heads __SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a ) return config, input_dict def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig( vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return TaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model( input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, ) __SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a ) __SCREAMING_SNAKE_CASE = result.last_hidden_state __SCREAMING_SNAKE_CASE = result.past_key_values __SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ), config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ), 4 ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = model(_a ) __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval() __SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE__ =[0.8, 0.9] def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision" ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs[0] __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) __SCREAMING_SNAKE_CASE = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), } for attn_name, (name, mask) in zip(_a, head_masking.items() ): __SCREAMING_SNAKE_CASE = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_heads, device=_a ) __SCREAMING_SNAKE_CASE = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, ) # We check the state of decoder_attentions and cross_attentions just from the last step __SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __lowerCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a ) __SCREAMING_SNAKE_CASE = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_a, _a ) __SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) ) __SCREAMING_SNAKE_CASE = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a ) self.assertEqual(_a, _a )
693
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Any: __lowerCamelCase : int = 1 __lowerCamelCase : List[str] = 3 __lowerCamelCase : Optional[int] = (32, 32) __lowerCamelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) return image @property def lowercase_ ( self ) -> str: torch.manual_seed(0 ) __lowerCamelCase : Tuple = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def lowercase_ ( self ) -> str: torch.manual_seed(0 ) __lowerCamelCase : Optional[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowercase_ ( self ) -> List[Any]: torch.manual_seed(0 ) __lowerCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[Any] = self.dummy_cond_unet_upscale __lowerCamelCase : Dict = DDPMScheduler() __lowerCamelCase : List[str] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : str = self.dummy_vae __lowerCamelCase : List[Any] = self.dummy_text_encoder __lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCamelCase : Any = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger' __lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : List[Any] = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Optional[Any] = output.images __lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : int = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE_ , )[0] __lowerCamelCase : Tuple = image[0, -3:, -3:, -1] __lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] __lowerCamelCase : Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __lowerCamelCase : Optional[int] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : List[Any] = self.dummy_cond_unet_upscale __lowerCamelCase : List[Any] = DDPMScheduler() __lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : Optional[Any] = self.dummy_vae __lowerCamelCase : str = self.dummy_text_encoder __lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCamelCase : str = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger' __lowerCamelCase : List[Any] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Any = output.images assert image.shape[0] == 2 __lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : List[Any] = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Union[str, Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowercase_ ( self ) -> str: __lowerCamelCase : int = self.dummy_cond_unet_upscale __lowerCamelCase : Union[str, Any] = DDPMScheduler() __lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : Any = self.dummy_vae __lowerCamelCase : str = self.dummy_text_encoder __lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __lowerCamelCase : str = unet.half() __lowerCamelCase : int = text_encoder.half() # make sure here that pndm scheduler skips prk __lowerCamelCase : int = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 'A painting of a squirrel eating a burger' __lowerCamelCase : Optional[int] = torch.manual_seed(0 ) __lowerCamelCase : Dict = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='np' , ).images __lowerCamelCase : int = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Dict: __lowerCamelCase : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat.npy' ) __lowerCamelCase : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() __lowerCamelCase : Tuple = 'a cat sitting on a park bench' __lowerCamelCase : Dict = torch.manual_seed(0 ) __lowerCamelCase : Tuple = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , ) __lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1E-3 def lowercase_ ( self ) -> List[str]: __lowerCamelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat_fp16.npy' ) __lowerCamelCase : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() __lowerCamelCase : str = 'a cat sitting on a park bench' __lowerCamelCase : Dict = torch.manual_seed(0 ) __lowerCamelCase : int = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , ) __lowerCamelCase : List[str] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowercase_ ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : Dict = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : Any = StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase : Union[str, Any] = 'a cat sitting on a park bench' __lowerCamelCase : Union[str, Any] = torch.manual_seed(0 ) __lowerCamelCase : List[Any] = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type='np' , ) __lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
13
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __SCREAMING_SNAKE_CASE = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) __SCREAMING_SNAKE_CASE = model.state_dict() def to_tf_var_name(__snake_case :str ): for patt, repl in iter(__snake_case ): __SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case ) return f'''bert/{name}''' def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ): __SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype ) __SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case ) __SCREAMING_SNAKE_CASE = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __SCREAMING_SNAKE_CASE = torch_tensor.T __SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = session.run(__snake_case ) print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) __SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _A ( __snake_case :str=None ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) __SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
693
0
import argparse from collections import defaultdict def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple ,__a : Tuple ,__a : Dict ,__a : Tuple ) -> List[Any]: """simple docstring""" _a : List[str] = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__a ,'''r''' ) as f: _a : Dict = f.readlines() _a : str = F"""class {class_name}(""" _a : Tuple = F"""{4 * ' '}def {test_name}(""" _a : List[Any] = F"""{8 * ' '}{correct_line.split()[0]}""" _a : Tuple = F"""{16 * ' '}{correct_line.split()[0]}""" _a : Tuple = False _a : str = False _a : Any = False _a : Dict = False _a : Tuple = 0 _a : List[str] = 0 _a : List[Any] = [] for line in lines: if line.startswith(__a ): _a : Tuple = True elif in_class and line.startswith(__a ): _a : List[str] = True elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )): _a : Tuple = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _a : Dict = True if in_class and in_func and in_line: if ")" not in line: continue else: _a : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * ' '}{correct_line}""" ) _a : Optional[Any] = False else: new_lines.append(__a ) with open(__a ,'''w''' ) as f: for line in new_lines: f.write(__a ) def __UpperCAmelCase ( __a : Dict ,__a : Tuple=None ) -> Union[str, Any]: """simple docstring""" if fail is not None: with open(__a ,'''r''' ) as f: _a : Optional[int] = {l.strip() for l in f.readlines()} else: _a : List[Any] = None with open(__a ,'''r''' ) as f: _a : List[Any] = f.readlines() _a : List[Any] = defaultdict(__a ) for line in correct_lines: _a , _a , _a , _a : Dict = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__a ,__a ,__a ,__a ,__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) a__ = parser.parse_args() main(args.correct_filename, args.fail_filename)
14
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""] def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str: super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_a, np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa ) elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a ).T] # verify inputs are valid for idx, example in enumerate(_a ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( _a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a ) return padded_inputs
693
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Optional[int] = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A : Any = logging.get_logger(__name__) __A : List[Any] = 'โ–' __A : List[str] = {'vocab_file': 'sentencepiece.bpe.model'} __A : Union[str, Any] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } __A : Union[str, Any] = { 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off __A : Optional[int] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = ["input_ids", "attention_mask"] lowerCamelCase__ = [] lowerCamelCase__ = [] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : str="<s>" , __lowerCamelCase : Union[str, Any]="<unk>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=False , **__lowerCamelCase : List[str] , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE = legacy_behaviour super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | 'โ–n' | 'โ–m' | 'โ–t' | 'โ–k' | 'โ–a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | 'โ–n' | 'โ–m' | 'โ–t' | 'โ–k' | 'โ–a' | 'โ–s' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = len(self.sp_model ) SCREAMING_SNAKE_CASE = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase ) } SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else "eng_Latn" SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _snake_case ( self : Any ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _snake_case ( self : Optional[int] ): return self._src_lang @src_lang.setter def _snake_case ( self : List[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones def _snake_case ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Union[str, Any] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) SCREAMING_SNAKE_CASE = src_lang SCREAMING_SNAKE_CASE = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tgt_lang_id return inputs def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self : Any , __lowerCamelCase : str ): return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def _snake_case ( self : List[str] , __lowerCamelCase : str ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(__lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self : int , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip() return out_string def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,) def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str = "eng_Latn" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "fra_Latn" , **__lowerCamelCase : Optional[int] , ): SCREAMING_SNAKE_CASE = src_lang SCREAMING_SNAKE_CASE = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any ): return self.set_src_lang_special_tokens(self.src_lang ) def _snake_case ( self : Optional[Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _snake_case ( self : List[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE = [self.cur_lang_code] SCREAMING_SNAKE_CASE = [self.eos_token_id] def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE = [self.cur_lang_code] SCREAMING_SNAKE_CASE = [self.eos_token_id]
16
def _A ( __snake_case :int = 400_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
693
0
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ : Optional[Any] = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) ) class lowerCamelCase_ ( _lowercase ): _lowercase : Optional[int] = None _lowercase : str = None def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ): with TemporaryDirectory() as tmp_dir: __A : List[Any] = dataset_module_factory(__A , cache_dir=__A ) __A : Tuple = import_main_class(dataset_module.module_path , dataset=__A ) __A : DatasetBuilder = builder_cls( cache_dir=__A , config_name=__A , hash=dataset_module.hash , ) __A : List[Any] = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) __A : Union[str, Any] = cached_path(__A , cache_dir=__A ) self.assertTrue(os.path.exists(__A ) ) @pytest.mark.integration def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]: __A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" __A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ ) __A : List[Any] = import_main_class(dataset_module.module_path ) __A : DatasetBuilder = builder_cls( cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __A : Any = None builder_instance.download_and_prepare() __A : Union[str, Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]: __A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ ) __A : str = import_main_class(dataset_module.module_path ,dataset=a__ ) __A : DatasetBuilder = builder_cls( cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,) __A : Optional[int] = builder_instance.as_streaming_dataset() assert ds assert isinstance(a__ ,a__ ) assert "train" in ds assert isinstance(ds["""train"""] ,a__ ) assert next(iter(ds["""train"""] ) )
17
from __future__ import annotations _snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(__snake_case ) for i in range(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(__snake_case ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case : Optional[Any] = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
693
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
18
from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self, _a ) -> Any: __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __repr__( self ) -> str: return f'''Node({self.data})''' class __SCREAMING_SNAKE_CASE : def __init__( self ) -> Tuple: __SCREAMING_SNAKE_CASE = None def __iter__( self ) -> Any: __SCREAMING_SNAKE_CASE = self.head while node: yield node.data __SCREAMING_SNAKE_CASE = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(_a ) for item in self] ) def __getitem__( self, _a ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, _a, _a ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) __SCREAMING_SNAKE_CASE = self.head for _ in range(_a ): __SCREAMING_SNAKE_CASE = current.next __SCREAMING_SNAKE_CASE = data def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(len(self ), _a ) def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(0, _a ) def __lowerCAmelCase ( self, _a, _a ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) __SCREAMING_SNAKE_CASE = Node(_a ) if self.head is None: __SCREAMING_SNAKE_CASE = new_node elif index == 0: __SCREAMING_SNAKE_CASE = self.head # link new_node to head __SCREAMING_SNAKE_CASE = new_node else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = new_node def __lowerCAmelCase ( self ) -> None: # print every node data print(self ) def __lowerCAmelCase ( self ) -> Any: return self.delete_nth(0 ) def __lowerCAmelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self, _a = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) __SCREAMING_SNAKE_CASE = self.head # default first node if index == 0: __SCREAMING_SNAKE_CASE = self.head.next else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next.next return delete_node.data def __lowerCAmelCase ( self ) -> bool: return self.head is None def __lowerCAmelCase ( self ) -> None: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = self.head while current: # Store the current node's next node. __SCREAMING_SNAKE_CASE = current.next # Make the current node's next point backwards __SCREAMING_SNAKE_CASE = prev # Make the previous node be the current node __SCREAMING_SNAKE_CASE = current # Make the current node the next node (to progress iteration) __SCREAMING_SNAKE_CASE = next_node # Return prev in order to put the head at the end __SCREAMING_SNAKE_CASE = prev def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __SCREAMING_SNAKE_CASE = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [ -9, 100, Node(7734_5112 ), "dlrow olleH", 7, 5555, 0, -1_9_2.5_5_5_5_5, "Hello, world!", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] __SCREAMING_SNAKE_CASE = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __SCREAMING_SNAKE_CASE = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __SCREAMING_SNAKE_CASE = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> Union[str, Any]: """simple docstring""" from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__snake_case ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) __SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip() print("New list:" ) print(__snake_case ) print(f'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
693
0
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _a = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _a = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} _a = """zero2""" _a = """zero3""" _a = [ZEROa, ZEROa] def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = parameterized.to_safe_name('''_'''.join(str(__snake_case ) for x in param.args ) ) return F'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test _a = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _UpperCAmelCase( lowerCamelCase ): @parameterized.expand(__a , name_func=__a) def UpperCAmelCase ( self , __a , __a) -> List[str]: '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a) def UpperCAmelCase ( self , __a , __a) -> int: '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @parameterized.expand(__a , name_func=__a) def UpperCAmelCase ( self , __a , __a) -> List[str]: '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) @require_torch_multi_gpu @parameterized.expand(__a , name_func=__a) def UpperCAmelCase ( self , __a , __a) -> Dict: '''simple docstring''' self.run_and_check( stage=__a , model=__a , distributed=__a , fpaa=__a , ) def UpperCAmelCase ( self , __a) -> Tuple: '''simple docstring''' # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def UpperCAmelCase ( self , __a , __a , __a = 10 , __a = True , __a = True , __a = True , ) -> Dict: '''simple docstring''' _UpperCamelCase = models[model] _UpperCamelCase = self.run_trainer( stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , ) self.do_checks(__a) return output_dir def UpperCAmelCase ( self , __a , __a , __a = 10 , __a = 1 , __a = True , __a = True , ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=__a) _UpperCamelCase = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__a)} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _UpperCamelCase = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _UpperCamelCase = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _UpperCamelCase = self.get_launcher(__a) _UpperCamelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__a , env=self.get_env()) return output_dir def UpperCAmelCase ( self , __a=False) -> Tuple: '''simple docstring''' # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _UpperCamelCase = min(2 , get_gpu_count()) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
19
import argparse import json from tqdm import tqdm def _A ( ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , ) __SCREAMING_SNAKE_CASE = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: __SCREAMING_SNAKE_CASE = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): __SCREAMING_SNAKE_CASE = dpr_record["question"] __SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(__snake_case ) + "\n" ) if __name__ == "__main__": main()
693
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup _lowerCAmelCase: Any = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def _lowercase( __a : str = "dhaka" , __a : int = 5 ): a__ =min(__a , 50 ) # Prevent abuse! a__ ={ 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } a__ =requests.get('https://www.google.com/search' , params=__a , headers=__a ) a__ =BeautifulSoup(html.text , 'html.parser' ) a__ =''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) a__ =json.dumps(__a ) a__ =json.loads(__a ) a__ =re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __a , ) if not matched_google_image_data: return 0 a__ =re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__a ) , ) a__ =re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __a , ) for index, fixed_full_res_image in enumerate(__a ): if index >= max_images: return index a__ =bytes(__a , 'ascii' ).decode( 'unicode-escape' ) a__ =bytes(__a , 'ascii' ).decode( 'unicode-escape' ) a__ =urllib.request.build_opener() a__ =[ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(__a ) a__ =f"""query_{query.replace(' ' , '_' )}""" if not os.path.exists(__a ): os.makedirs(__a ) urllib.request.urlretrieve( # noqa: S310 __a , f"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: _lowerCAmelCase: Optional[Any] = download_images_from_google_query(sys.argv[1]) print(F"""{image_count} images were downloaded to disk.""") except IndexError: print('Please provide a search term.') raise
20
def _A ( __snake_case :int = 10**9 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
693
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class __A ( UpperCamelCase__ ): UpperCamelCase = """mra""" def __init__( self :Tuple , __snake_case :int=5_02_65 , __snake_case :Optional[int]=7_68 , __snake_case :Union[str, Any]=12 , __snake_case :List[Any]=12 , __snake_case :int=30_72 , __snake_case :str="gelu" , __snake_case :str=0.1 , __snake_case :Optional[int]=0.1 , __snake_case :Union[str, Any]=5_12 , __snake_case :Any=1 , __snake_case :Tuple=0.02 , __snake_case :Dict=1E-5 , __snake_case :int="absolute" , __snake_case :Any=4 , __snake_case :Tuple="full" , __snake_case :int=0 , __snake_case :str=0 , __snake_case :str=1 , __snake_case :str=0 , __snake_case :Tuple=2 , **__snake_case :List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) __magic_name__ : Optional[Any] =vocab_size __magic_name__ : List[Any] =max_position_embeddings __magic_name__ : str =hidden_size __magic_name__ : List[str] =num_hidden_layers __magic_name__ : int =num_attention_heads __magic_name__ : str =intermediate_size __magic_name__ : str =hidden_act __magic_name__ : Dict =hidden_dropout_prob __magic_name__ : List[Any] =attention_probs_dropout_prob __magic_name__ : Any =initializer_range __magic_name__ : Optional[int] =type_vocab_size __magic_name__ : Tuple =layer_norm_eps __magic_name__ : List[str] =position_embedding_type __magic_name__ : List[str] =block_per_row __magic_name__ : Tuple =approx_mode __magic_name__ : Optional[int] =initial_prior_first_n_blocks __magic_name__ : List[str] =initial_prior_diagonal_n_blocks
21
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _snake_case , _snake_case , _snake_case : List[Any] = False, False, False @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =None SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =None # Automatically constructed SCREAMING_SNAKE_CASE__ ="dict" SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Optional[int]: return self.pa_type def __lowerCAmelCase ( self, _a ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_a, _a ): return {"bytes": None, "path": value} elif isinstance(_a, _a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE = BytesIO() sf.write(_a, value["array"], value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67 __SCREAMING_SNAKE_CASE = BytesIO(bytes() ) sf.write(_a, _a, value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __lowerCAmelCase ( self, _a, _a = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __SCREAMING_SNAKE_CASE = token_per_repo_id or {} __SCREAMING_SNAKE_CASE = path.split("::" )[-1] try: __SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"] __SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE = None with xopen(_a, "rb", use_auth_token=_a ) as f: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) __SCREAMING_SNAKE_CASE = array.T if self.mono: __SCREAMING_SNAKE_CASE = librosa.to_mono(_a ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __lowerCAmelCase ( self, _a ) -> pa.StructArray: if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("bytes" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("path" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() ) return array_cast(_a, self.pa_type ) def __lowerCAmelCase ( self, _a ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_a ): with xopen(_a, "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() return bytes_ __SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) __SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(_a, self.pa_type )
693
0
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class A ( _a ,unittest.TestCase ): lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def __lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _a = '''[PAD]''' _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(lowerCAmelCase_ ) , 10_12 ) def __lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_12 ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" _a = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) _a = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase_ , ['''โ–This''', '''โ–is''', '''โ–a''', '''โ–t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) _a = tokenizer.tokenize('''I was born in 92000, and this is falsรฉ.''' ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''รฉ''', '''.''', ] , ) _a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" _a = '''Hello World!''' _a = [3_53_89, 66_72, 49, 2] self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" _a = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
22
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00} config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, **_a ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> str: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
693
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : Union[str, Any] = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _a : """simple docstring""" A_ = PegasusConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Optional[int]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) UpperCamelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: UpperCamelCase_ = 20 UpperCamelCase_ = model_class_name(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] ) UpperCamelCase_ , UpperCamelCase_ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) UpperCamelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase_ = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase_ = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = model.decode(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = 20 UpperCamelCase_ = model_class_name(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] ) UpperCamelCase_ , UpperCamelCase_ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase_ = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase_ = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = np.not_equal(__lowercase , config.pad_token_id).astype(np.inta) if decoder_attention_mask is None: UpperCamelCase_ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _a ( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) A_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () A_ = True A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = FlaxPegasusModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('JIT Enabled' ): UpperCamelCase_ = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase_ = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) UpperCamelCase_ = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('JIT Enabled' ): UpperCamelCase_ = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase_ = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _UpperCAmelCase ( self ) -> int: for model_class_name in self.all_model_classes: UpperCamelCase_ = model_class_name.from_pretrained('google/pegasus-large' , from_pt=_UpperCAmelCase ) UpperCamelCase_ = np.ones((1, 1) ) UpperCamelCase_ = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) UpperCamelCase_ = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) UpperCamelCase_ = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] UpperCamelCase_ = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='np' , truncation=_UpperCAmelCase , max_length=512 , padding=_UpperCAmelCase ) UpperCamelCase_ = model.generate(**_UpperCAmelCase , num_beams=2 ).sequences UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) assert tgt_text == decoded
23
import random from .binary_exp_mod import bin_exp_mod def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __SCREAMING_SNAKE_CASE = n - 1 __SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __SCREAMING_SNAKE_CASE = 0 while count < prec: __SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 ) __SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case ) if b != 1: __SCREAMING_SNAKE_CASE = True for _ in range(__snake_case ): if b == n - 1: __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _snake_case : int = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
693
0
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]: '''simple docstring''' __snake_case = old_name if "patch_embed" in old_name: __snake_case , __snake_case , __snake_case = old_name.split('''.''' ) if layer == "0": __snake_case = old_name.replace('''0''' , '''convolution1''' ) elif layer == "1": __snake_case = old_name.replace('''1''' , '''batchnorm_before''' ) elif layer == "3": __snake_case = old_name.replace('''3''' , '''convolution2''' ) else: __snake_case = old_name.replace('''4''' , '''batchnorm_after''' ) if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ): __snake_case = R'''\b\d{2}\b''' if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ): __snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group() else: __snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group() if int(match[0] ) < 6: __snake_case = old_name.replace(_lowerCamelCase , '''''' ) __snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) __snake_case = '''intermediate_stages.''' + trimmed_name else: __snake_case = old_name.replace(_lowerCamelCase , '''''' ) if int(match[2] ) < num_meta4D_last_stage: __snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] ) else: __snake_case = str(int(match[2] ) - num_meta4D_last_stage ) __snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: __snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' ) elif "norm2" in old_name: __snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' ) elif "fc1" in old_name: __snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' ) elif "fc2" in old_name: __snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' ) __snake_case = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ): __snake_case = old_name.replace('''network''' , '''intermediate_stages''' ) if "fc" in new_name: __snake_case = new_name.replace('''fc''' , '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): __snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): __snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' ) if "proj" in new_name: __snake_case = new_name.replace('''proj''' , '''projection''' ) if "dist_head" in new_name: __snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' ) elif "head" in new_name: __snake_case = new_name.replace('''head''' , '''classifier''' ) elif "patch_embed" in new_name: __snake_case = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __snake_case = new_name.replace('''norm''' , '''layernorm''' ) __snake_case = '''efficientformer.''' + new_name else: __snake_case = '''efficientformer.encoder.''' + new_name return new_name def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]: '''simple docstring''' for key in checkpoint.copy().keys(): __snake_case = checkpoint.pop(_lowerCamelCase ) __snake_case = val return checkpoint def _UpperCamelCase ()-> Tuple: '''simple docstring''' __snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]: '''simple docstring''' __snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model'''] __snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase ) __snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase ) __snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) __snake_case = config.depths[-1] - config.num_metaad_blocks + 1 __snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() __snake_case = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __snake_case = prepare_img() __snake_case = 2_56 __snake_case = 2_24 __snake_case = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , ) __snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values # original processing pipeline __snake_case = Compose( [ Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(_lowerCamelCase ), ToTensor(), Normalize(_lowerCamelCase , _lowerCamelCase ), ] ) __snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) __snake_case = model(_lowerCamelCase ) __snake_case = outputs.logits __snake_case = (1, 10_00) if "l1" in model_name: __snake_case = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: __snake_case = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: __snake_case = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' ) # Save Checkpoints Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) processor.save_pretrained(_lowerCamelCase ) print(f'''Processor successfuly saved at {pytorch_dump_path}''' ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , ) processor.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
24
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __SCREAMING_SNAKE_CASE = ksize + 1 __SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__snake_case ): for x in range(__snake_case ): # distance from center __SCREAMING_SNAKE_CASE = x - ksize // 2 __SCREAMING_SNAKE_CASE = y - ksize // 2 # degree to radiant __SCREAMING_SNAKE_CASE = theta / 180 * np.pi __SCREAMING_SNAKE_CASE = np.cos(_theta ) __SCREAMING_SNAKE_CASE = np.sin(_theta ) # get kernel x __SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py # get kernel y __SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py # fill kernel __SCREAMING_SNAKE_CASE = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _snake_case : Union[str, Any] = imread('../image_data/lena.jpg') # turn image in gray scale value _snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _snake_case : int = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: _snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _snake_case : Optional[Any] = out / out.max() * 2_55 _snake_case : Union[str, Any] = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
693
0
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = parent SCREAMING_SNAKE_CASE : Any = batch_size SCREAMING_SNAKE_CASE : Optional[int] = seq_length SCREAMING_SNAKE_CASE : List[Any] = is_training SCREAMING_SNAKE_CASE : int = use_input_mask SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids SCREAMING_SNAKE_CASE : str = use_labels SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : str = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : Tuple = num_labels SCREAMING_SNAKE_CASE : Tuple = num_choices SCREAMING_SNAKE_CASE : Optional[Any] = scope def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Dict ) -> str: """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a ) SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.num_choices SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE : Optional[Any] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a ) def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a ) def __UpperCamelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a ) def __UpperCamelCase ( self : str ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a ) def __UpperCamelCase ( self : List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a ) @slow def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Any = model_class(config=a ) SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace( a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , "traced_model.pt" ) ) SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a ) loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) ) @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0] SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
25
def _A ( __snake_case :int ) -> int: """simple docstring""" assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0''' raise ValueError(__snake_case ) else: __SCREAMING_SNAKE_CASE = sylvester(number - 1 ) __SCREAMING_SNAKE_CASE = num - 1 __SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
693
0
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Tuple = AutoencoderKL lowercase__: int = '''sample''' lowercase__: Dict = 1e-2 @property def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = 4 __snake_case : Optional[Any] = 3 __snake_case : Optional[Any] = (32, 32) __snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ ) return {"sample": image} @property def lowercase__ ( self : Any ) -> str: """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return (3, 32, 32) def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" ) def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : List[Any] = self.prepare_init_args_and_inputs_for_common() __snake_case : int = self.model_class(**__magic_name__ ) model.to(__magic_name__ ) assert not model.is_gradient_checkpointing and model.training __snake_case : Any = model(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __snake_case : List[str] = torch.randn_like(__magic_name__ ) __snake_case : Dict = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __snake_case : Optional[int] = self.model_class(**__magic_name__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__magic_name__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __snake_case : Dict = model_a(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __snake_case : Optional[Any] = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __snake_case : Any = dict(model.named_parameters() ) __snake_case : Any = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__magic_name__ ) __snake_case : Tuple = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) __snake_case : Optional[int] = model.to(__magic_name__ ) model.eval() if torch_device == "mps": __snake_case : Optional[Any] = torch.manual_seed(0 ) else: __snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) __snake_case : Optional[Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __snake_case : Any = image.to(__magic_name__ ) with torch.no_grad(): __snake_case : Any = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __snake_case : Optional[int] = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __snake_case : List[Any] = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __snake_case : str = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) ) @slow class _A ( unittest.TestCase ): def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : str ) -> Optional[Any]: """simple docstring""" return f'''gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy''' def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Tuple , __magic_name__ : str=0 , __magic_name__ : Tuple=(4, 3, 5_12, 5_12) , __magic_name__ : Optional[int]=False ) -> Optional[Any]: """simple docstring""" __snake_case : int = torch.floataa if fpaa else torch.floataa __snake_case : str = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ ) return image def lowercase__ ( self : Dict , __magic_name__ : Any="CompVis/stable-diffusion-v1-4" , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = """fp16""" if fpaa else None __snake_case : Tuple = torch.floataa if fpaa else torch.floataa __snake_case : Dict = AutoencoderKL.from_pretrained( __magic_name__ , subfolder="""vae""" , torch_dtype=__magic_name__ , revision=__magic_name__ , ) model.to(__magic_name__ ).eval() return model def lowercase__ ( self : Tuple , __magic_name__ : Tuple=0 ) -> int: """simple docstring""" if torch_device == "mps": return torch.manual_seed(__magic_name__ ) return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : List[str] = self.get_sd_vae_model() __snake_case : int = self.get_sd_image(__magic_name__ ) __snake_case : Tuple = self.get_generator(__magic_name__ ) with torch.no_grad(): __snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape __snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() __snake_case : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = self.get_sd_vae_model(fpaa=__magic_name__ ) __snake_case : int = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ ) __snake_case : Union[str, Any] = self.get_generator(__magic_name__ ) with torch.no_grad(): __snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape __snake_case : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu() __snake_case : int = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : int = self.get_sd_vae_model() __snake_case : List[Any] = self.get_sd_image(__magic_name__ ) with torch.no_grad(): __snake_case : str = model(__magic_name__ ).sample assert sample.shape == image.shape __snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu() __snake_case : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = self.get_sd_vae_model() __snake_case : Tuple = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): __snake_case : Dict = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __snake_case : int = sample[-1, -2:, :2, -2:].flatten().cpu() __snake_case : Optional[Any] = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.get_sd_vae_model(fpaa=__magic_name__ ) __snake_case : Optional[Any] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): __snake_case : List[str] = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __snake_case : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() __snake_case : Optional[Any] = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def lowercase__ ( self : Any , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : List[str] = self.get_sd_vae_model(fpaa=__magic_name__ ) __snake_case : Optional[int] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): __snake_case : Any = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __snake_case : int = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def lowercase__ ( self : Dict , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = self.get_sd_vae_model() __snake_case : Dict = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): __snake_case : List[str] = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __snake_case : List[Any] = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = self.get_sd_vae_model() __snake_case : Optional[int] = self.get_sd_image(__magic_name__ ) __snake_case : List[Any] = self.get_generator(__magic_name__ ) with torch.no_grad(): __snake_case : List[Any] = model.encode(__magic_name__ ).latent_dist __snake_case : Optional[Any] = dist.sample(generator=__magic_name__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __snake_case : int = sample[0, -1, -3:, -3:].flatten().cpu() __snake_case : Tuple = torch.tensor(__magic_name__ ) __snake_case : Any = 3E-3 if torch_device != """mps""" else 1E-2 assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
26
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_a ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @require_tf def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @slow @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
693
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = 'M-CLIP' def __init__( self , snake_case_=1024 , snake_case_=768 , **snake_case_ ): _A = transformerDimSize _A = imageDimSize super().__init__(**snake_case_ ) class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = MCLIPConfig def __init__( self , snake_case_ , *snake_case_ , **snake_case_ ): super().__init__(snake_case_ , *snake_case_ , **snake_case_ ) _A = XLMRobertaModel(snake_case_ ) _A = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): _A = self.transformer(input_ids=snake_case_ , attention_mask=snake_case_ )[0] _A = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(snake_case_ ), embs
27
from __future__ import annotations import math def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] __SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
0
'''simple docstring''' import numpy as np UpperCamelCase_ = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = np.array(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = np.where(letter == self.SQUARE ) SCREAMING_SNAKE_CASE : List[Any] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.SQUARE[indexa - 1, indexa - 1] return letter def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = message.lower() SCREAMING_SNAKE_CASE : List[Any] = message.replace(' ', '' ) SCREAMING_SNAKE_CASE : Optional[int] = message.replace('j', 'i' ) SCREAMING_SNAKE_CASE : Dict = np.empty((2, len(A )) ) for letter_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE : Tuple = numbers[0] SCREAMING_SNAKE_CASE : str = numbers[1] SCREAMING_SNAKE_CASE : Any = first_step.reshape(2 * len(A ) ) SCREAMING_SNAKE_CASE : Tuple = '' for numbers_index in range(len(A ) ): SCREAMING_SNAKE_CASE : List[Any] = int(second_step[numbers_index * 2] ) SCREAMING_SNAKE_CASE : Dict = int(second_step[(numbers_index * 2) + 1] ) SCREAMING_SNAKE_CASE : Optional[Any] = self.numbers_to_letter(A, A ) SCREAMING_SNAKE_CASE : Dict = encoded_message + letter return encoded_message def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = message.lower() message.replace(' ', '' ) SCREAMING_SNAKE_CASE : int = np.empty(2 * len(A ) ) for letter_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Tuple = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE : str = numbers[0] SCREAMING_SNAKE_CASE : Dict = numbers[1] SCREAMING_SNAKE_CASE : str = first_step.reshape((2, len(A )) ) SCREAMING_SNAKE_CASE : Any = '' for numbers_index in range(len(A ) ): SCREAMING_SNAKE_CASE : Any = int(second_step[0, numbers_index] ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(second_step[1, numbers_index] ) SCREAMING_SNAKE_CASE : Any = self.numbers_to_letter(A, A ) SCREAMING_SNAKE_CASE : Dict = decoded_message + letter return decoded_message
28
def _A ( __snake_case :bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def _A ( __snake_case :str ) -> bytes: """simple docstring""" if (len(__snake_case ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
"""simple docstring""" import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging A_ = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): # Initialise PyTorch model lowerCamelCase_ = XLNetConfig.from_json_file(lowerCAmelCase__ ) lowerCamelCase_ = finetuning_task.lower() if finetuning_task is not None else '''''' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" ) lowerCamelCase_ = finetuning_task lowerCamelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task] lowerCamelCase_ = XLNetForSequenceClassification(lowerCAmelCase__ ) elif "squad" in finetuning_task: lowerCamelCase_ = finetuning_task lowerCamelCase_ = XLNetForQuestionAnswering(lowerCAmelCase__ ) else: lowerCamelCase_ = XLNetLMHeadModel(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Save pytorch-model lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}" ) torch.save(model.state_dict() ,lowerCAmelCase__ ) print(f"Save configuration file to {os.path.abspath(lowerCAmelCase__ )}" ) with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--xlnet_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained XLNet model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--finetuning_task""", default=None, type=str, help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""", ) A_ = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
29
from functools import lru_cache def _A ( __snake_case :int ) -> set: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__snake_case ) if n > 1: factors.add(__snake_case ) return factors @lru_cache def _A ( __snake_case :int ) -> int: """simple docstring""" return len(unique_prime_factors(__snake_case ) ) def _A ( __snake_case :list ) -> bool: """simple docstring""" return len(set(__snake_case ) ) in (0, 1) def _A ( __snake_case :int ) -> list: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 while True: # Increment each value of a generated range __SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. __SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group] checker.append(__snake_case ) # If all numbers in the list are equal, return the group variable. if equality(__snake_case ): return group # Increment our base variable by 1 base += 1 def _A ( __snake_case :int = 4 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = run(__snake_case ) return results[0] if len(__snake_case ) else None if __name__ == "__main__": print(solution())
693
0
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __a( _a ): """simple docstring""" lowerCAmelCase = '''perceiver''' def __init__( self ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=1_280 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=26 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="kv" ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=262 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=56 ,_SCREAMING_SNAKE_CASE=[368, 496] ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=1_920 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=[1, 16, 224, 224] ,**_SCREAMING_SNAKE_CASE ,) -> int: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = num_latents UpperCAmelCase_ : Any = d_latents UpperCAmelCase_ : List[str] = d_model UpperCAmelCase_ : int = num_blocks UpperCAmelCase_ : str = num_self_attends_per_block UpperCAmelCase_ : Any = num_self_attention_heads UpperCAmelCase_ : List[str] = num_cross_attention_heads UpperCAmelCase_ : List[Any] = qk_channels UpperCAmelCase_ : Union[str, Any] = v_channels UpperCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention UpperCAmelCase_ : List[str] = self_attention_widening_factor UpperCAmelCase_ : Optional[Any] = cross_attention_widening_factor UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : int = use_query_residual # masked language modeling attributes UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Tuple = max_position_embeddings # image classification attributes UpperCAmelCase_ : Tuple = image_size # flow attributes UpperCAmelCase_ : int = train_size # multimodal autoencoding attributes UpperCAmelCase_ : int = num_frames UpperCAmelCase_ : Dict = audio_samples_per_frame UpperCAmelCase_ : Dict = samples_per_patch UpperCAmelCase_ : str = output_shape class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ : Tuple = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def a__ ( self ) -> float: return 1e-4 def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : Dict = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : str = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Union[str, Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size UpperCAmelCase_ : Optional[int] = dict(preprocessor(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : List[str] = inputs.pop('''input_ids''' ) return inputs elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : int = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ : Optional[int] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : str = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
30
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _A ( __snake_case :Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(__snake_case , __snake_case ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = "huggingface/label-files" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 400 __SCREAMING_SNAKE_CASE = "kinetics400-id2label.json" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 174 __SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if "small" in model_name: __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 1536 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 192 __SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 1024 __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 1280 __SCREAMING_SNAKE_CASE = 5120 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 640 __SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def _A ( __snake_case :List[Any] ) -> Optional[int]: """simple docstring""" if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace("encoder." , "" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" ) return name def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case ) if key.startswith("encoder." ): __SCREAMING_SNAKE_CASE = key.replace("encoder." , "" ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split("." ) if key.startswith("decoder.blocks" ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = "decoder.decoder_layers." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = "videomae.encoder.layer." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _A ( ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __SCREAMING_SNAKE_CASE = np.load(__snake_case ) return list(__snake_case ) def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = "pytorch_model.bin" gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" ) if "model" in files: __SCREAMING_SNAKE_CASE = files["model"] else: __SCREAMING_SNAKE_CASE = files["module"] __SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case ) __SCREAMING_SNAKE_CASE = model(**__snake_case ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(__snake_case , __snake_case , atol=1e-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐Ÿค— hub.' ) _snake_case : Optional[int] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
693
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "nat" lowercase_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : int , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : Union[str, Any]=[3, 4, 6, 5] , _lowerCAmelCase : List[str]=[2, 4, 8, 16] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Any=3.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Optional[int]=1E-5 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Optional[int] , ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = embed_dim SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = num_heads SCREAMING_SNAKE_CASE_ = kernel_size SCREAMING_SNAKE_CASE_ = mlp_ratio SCREAMING_SNAKE_CASE_ = qkv_bias SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = drop_path_rate SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) SCREAMING_SNAKE_CASE_ = layer_scale_init_value SCREAMING_SNAKE_CASE_ = ['stem'] + [F"stage{idx}" for idx in range(1 , len(_lowerCAmelCase ) + 1 )] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
31
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, *_a, **_a ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead.", _a, ) super().__init__(*_a, **_a )
693
0
from pathlib import Path import numpy as np from PIL import Image def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" _UpperCAmelCase = np.zeros_like(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ = Path(__file__).resolve().parent / "image_data" / "lena.jpg" UpperCAmelCase_ = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
32
from math import sqrt def _A ( __snake_case :int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(__snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(__snake_case ): total += i + n // i elif i == sqrt(__snake_case ): total += i return total - n def _A ( __snake_case :int = 1_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , __snake_case ) if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
693
0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" snake_case__ = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
33
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class snake_case_ ( nn.Module ): """simple docstring""" def __init__( self) -> int: super().__init__() UpperCamelCase = nn.Linear(3 , 4) UpperCamelCase = nn.BatchNormad(4) UpperCamelCase = nn.Linear(4 , 5) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_))) class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> str: UpperCamelCase = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , model.state_dict()) UpperCamelCase = os.path.join(lowerCamelCase_ , '''index.json''') self.assertTrue(os.path.isfile(lowerCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: UpperCamelCase = os.path.join(lowerCamelCase_ , F'{key}.dat') self.assertTrue(os.path.isfile(lowerCamelCase_)) # TODO: add tests on the fact weights are properly loaded def UpperCAmelCase__ ( self) -> Tuple: UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: UpperCamelCase = torch.randn(2 , 3 , dtype=lowerCamelCase_) with TemporaryDirectory() as tmp_dir: UpperCamelCase = offload_weight(lowerCamelCase_ , '''weight''' , lowerCamelCase_ , {}) UpperCamelCase = os.path.join(lowerCamelCase_ , '''weight.dat''') self.assertTrue(os.path.isfile(lowerCamelCase_)) self.assertDictEqual(lowerCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCamelCase_).split('''.''')[1]}}) UpperCamelCase = load_offloaded_weight(lowerCamelCase_ , index['''weight''']) self.assertTrue(torch.equal(lowerCamelCase_ , lowerCamelCase_)) def UpperCAmelCase__ ( self) -> str: UpperCamelCase = ModelForTest() UpperCamelCase = model.state_dict() UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k} UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key])) UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' in k} UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_) # Duplicates are removed UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key])) def UpperCAmelCase__ ( self) -> Any: UpperCamelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2''']) self.assertDictEqual(lowerCamelCase_ , {'''a.1''': 0, '''a.2''': 2}) UpperCamelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2''']) self.assertDictEqual(lowerCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2})
34
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = encoder_seq_length __SCREAMING_SNAKE_CASE = decoder_seq_length # For common tests __SCREAMING_SNAKE_CASE = self.decoder_seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = d_ff __SCREAMING_SNAKE_CASE = relative_attention_num_buckets __SCREAMING_SNAKE_CASE = dropout_rate __SCREAMING_SNAKE_CASE = initializer_factor __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = decoder_layers def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig.from_pretrained("google/umt5-base" ) def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int: if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = config.num_attention_heads __SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a ) return config, input_dict def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig( vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return TaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model( input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, ) __SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a ) __SCREAMING_SNAKE_CASE = result.last_hidden_state __SCREAMING_SNAKE_CASE = result.past_key_values __SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ), config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ), 4 ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = model(_a ) __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval() __SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE__ =[0.8, 0.9] def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision" ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs[0] __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) __SCREAMING_SNAKE_CASE = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), } for attn_name, (name, mask) in zip(_a, head_masking.items() ): __SCREAMING_SNAKE_CASE = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_heads, device=_a ) __SCREAMING_SNAKE_CASE = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, ) # We check the state of decoder_attentions and cross_attentions just from the last step __SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __lowerCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a ) __SCREAMING_SNAKE_CASE = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_a, _a ) __SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) ) __SCREAMING_SNAKE_CASE = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a ) self.assertEqual(_a, _a )
693
0
from math import log from scipy.constants import Boltzmann, physical_constants a_ :Dict = 3_00 # TEMPERATURE (unit = K) def a ( A__ , A__ , A__ , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError('''Donor concentration should be positive''' ) elif acceptor_conc <= 0: raise ValueError('''Acceptor concentration should be positive''' ) elif intrinsic_conc <= 0: raise ValueError('''Intrinsic concentration should be positive''' ) elif donor_conc <= intrinsic_conc: raise ValueError( '''Donor concentration should be greater than intrinsic concentration''' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( '''Acceptor concentration should be greater than intrinsic concentration''' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
35
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __SCREAMING_SNAKE_CASE = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) __SCREAMING_SNAKE_CASE = model.state_dict() def to_tf_var_name(__snake_case :str ): for patt, repl in iter(__snake_case ): __SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case ) return f'''bert/{name}''' def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ): __SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype ) __SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case ) __SCREAMING_SNAKE_CASE = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __SCREAMING_SNAKE_CASE = torch_tensor.T __SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = session.run(__snake_case ) print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) __SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _A ( __snake_case :str=None ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) __SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
693
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : Dict = '''''' __lowerCamelCase : Union[str, Any] = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' super().__init__(self ,**SCREAMING_SNAKE_CASE_ ) snake_case : List[Any] = repo_info snake_case : Dict = token snake_case : Any = None def snake_case_ ( self ): '''simple docstring''' if self.dir_cache is None: snake_case : str = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes snake_case : Union[str, Any] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(SCREAMING_SNAKE_CASE_ ): {"""name""": str(SCREAMING_SNAKE_CASE_ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "rb" ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' if not isinstance(self.repo_info ,SCREAMING_SNAKE_CASE_ ): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) snake_case : Tuple = hf_hub_url(self.repo_info.id ,SCREAMING_SNAKE_CASE_ ,revision=self.repo_info.sha ) return fsspec.open( SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ ,headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open() def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' self._get_dirs() snake_case : List[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE_ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' self._get_dirs() snake_case : List[str] = PurePosixPath(path.strip("""/""" ) ) snake_case : Optional[int] = {} for p, f in self.dir_cache.items(): snake_case : List[str] = PurePosixPath(p.strip("""/""" ) ) snake_case : int = p.parent if root == path: snake_case : Any = f snake_case : Optional[Any] = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
36
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""] def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str: super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_a, np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa ) elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a ).T] # verify inputs are valid for idx, example in enumerate(_a ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( _a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a ) return padded_inputs
693
0
from math import sqrt def UpperCamelCase_ ( __a = 1_000_000 ) -> int: a__ : int = 0 a__ : int = 0 a__ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__a , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f"""{solution() = }""")
37
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup A_ : Optional[int] = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def UpperCamelCase__ ( __magic_name__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]: '''simple docstring''' snake_case__ : List[str] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): snake_case__ : Any = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() snake_case__ : List[Any] = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F'Job {i:>2} is {job[0]} at {job[1]}')
38
def _A ( __snake_case :int = 400_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
693
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "deta" SCREAMING_SNAKE_CASE : Optional[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=9_0_0 , _UpperCamelCase : Union[str, Any]=2_0_4_8 , _UpperCamelCase : Tuple=6 , _UpperCamelCase : List[str]=2_0_4_8 , _UpperCamelCase : Tuple=8 , _UpperCamelCase : Optional[int]=6 , _UpperCamelCase : int=1_0_2_4 , _UpperCamelCase : int=8 , _UpperCamelCase : str=0.0 , _UpperCamelCase : str=True , _UpperCamelCase : Tuple="relu" , _UpperCamelCase : int=2_5_6 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Any=0.02 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Any="sine" , _UpperCamelCase : Any=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=3_0_0 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : Dict=1 , _UpperCamelCase : Any=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=1 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=0.1 , _UpperCamelCase : List[Any]=0.25 , **_UpperCamelCase : str , ) ->Tuple: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) snake_case_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ = backbone_config.pop('''model_type''' ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(_UpperCamelCase ) snake_case_ = backbone_config snake_case_ = num_queries snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = init_xavier_std snake_case_ = encoder_layerdrop snake_case_ = auxiliary_loss snake_case_ = position_embedding_type # deformable attributes snake_case_ = num_feature_levels snake_case_ = encoder_n_points snake_case_ = decoder_n_points snake_case_ = two_stage snake_case_ = two_stage_num_proposals snake_case_ = with_box_refine snake_case_ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher snake_case_ = class_cost snake_case_ = bbox_cost snake_case_ = giou_cost # Loss coefficients snake_case_ = mask_loss_coefficient snake_case_ = dice_loss_coefficient snake_case_ = bbox_loss_coefficient snake_case_ = giou_loss_coefficient snake_case_ = eos_coefficient snake_case_ = focal_alpha super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def snake_case__( self : Tuple ) ->int: return self.encoder_attention_heads @property def snake_case__( self : Optional[Any] ) ->int: return self.d_model def snake_case__( self : Union[str, Any] ) ->Tuple: snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
39
from __future__ import annotations _snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(__snake_case ) for i in range(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(__snake_case ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(__snake_case ) return result def _A ( __snake_case :list[float] ) -> list[float]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(__snake_case ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case : Optional[Any] = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
693
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self, _a ) -> Any: __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __repr__( self ) -> str: return f'''Node({self.data})''' class __SCREAMING_SNAKE_CASE : def __init__( self ) -> Tuple: __SCREAMING_SNAKE_CASE = None def __iter__( self ) -> Any: __SCREAMING_SNAKE_CASE = self.head while node: yield node.data __SCREAMING_SNAKE_CASE = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(_a ) for item in self] ) def __getitem__( self, _a ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, _a, _a ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) __SCREAMING_SNAKE_CASE = self.head for _ in range(_a ): __SCREAMING_SNAKE_CASE = current.next __SCREAMING_SNAKE_CASE = data def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(len(self ), _a ) def __lowerCAmelCase ( self, _a ) -> None: self.insert_nth(0, _a ) def __lowerCAmelCase ( self, _a, _a ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) __SCREAMING_SNAKE_CASE = Node(_a ) if self.head is None: __SCREAMING_SNAKE_CASE = new_node elif index == 0: __SCREAMING_SNAKE_CASE = self.head # link new_node to head __SCREAMING_SNAKE_CASE = new_node else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = new_node def __lowerCAmelCase ( self ) -> None: # print every node data print(self ) def __lowerCAmelCase ( self ) -> Any: return self.delete_nth(0 ) def __lowerCAmelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self, _a = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) __SCREAMING_SNAKE_CASE = self.head # default first node if index == 0: __SCREAMING_SNAKE_CASE = self.head.next else: __SCREAMING_SNAKE_CASE = self.head for _ in range(index - 1 ): __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next __SCREAMING_SNAKE_CASE = temp.next.next return delete_node.data def __lowerCAmelCase ( self ) -> bool: return self.head is None def __lowerCAmelCase ( self ) -> None: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = self.head while current: # Store the current node's next node. __SCREAMING_SNAKE_CASE = current.next # Make the current node's next point backwards __SCREAMING_SNAKE_CASE = prev # Make the previous node be the current node __SCREAMING_SNAKE_CASE = current # Make the current node the next node (to progress iteration) __SCREAMING_SNAKE_CASE = next_node # Return prev in order to put the head at the end __SCREAMING_SNAKE_CASE = prev def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __SCREAMING_SNAKE_CASE = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [ -9, 100, Node(7734_5112 ), "dlrow olleH", 7, 5555, 0, -1_9_2.5_5_5_5_5, "Hello, world!", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] __SCREAMING_SNAKE_CASE = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __SCREAMING_SNAKE_CASE = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __SCREAMING_SNAKE_CASE = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _A ( ) -> Union[str, Any]: """simple docstring""" from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__snake_case ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) __SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip() print("New list:" ) print(__snake_case ) print(f'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
693
0
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCAmelCase__ = get_logger(__name__) class lowercase_ : """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Union[str, Any]=None ): __lowercase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self ,lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) __lowercase = module._original_module if isinstance(lowercase__ ,_PatchedModuleObj ) else module class lowercase_ : """simple docstring""" SCREAMING_SNAKE_CASE : str = [] def __init__( self : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Any=None ): __lowercase = obj __lowercase = target __lowercase = new __lowercase = target.split('''.''' )[0] __lowercase = {} __lowercase = attrs or [] def __enter__( self : Union[str, Any] ): *__lowercase , __lowercase = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase__ ) ): try: __lowercase = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __lowercase = getattr(self.obj ,lowercase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase__ ,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): __lowercase = obj_attr # patch at top level setattr(self.obj ,lowercase__ ,_PatchedModuleObj(lowercase__ ,attrs=self.attrs ) ) __lowercase = getattr(self.obj ,lowercase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase__ ,lowercase__ ,_PatchedModuleObj(getattr(lowercase__ ,lowercase__ ,lowercase__ ) ,attrs=self.attrs ) ) __lowercase = getattr(lowercase__ ,lowercase__ ) # finally set the target attribute setattr(lowercase__ ,lowercase__ ,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __lowercase = getattr(import_module('''.'''.join(lowercase__ ) ) ,lowercase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj ,lowercase__ ) is attr_value: __lowercase = getattr(self.obj ,lowercase__ ) setattr(self.obj ,lowercase__ ,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __lowercase = globals()['''__builtins__'''][target_attr] setattr(self.obj ,lowercase__ ,self.new ) else: raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." ) def __exit__( self : Tuple ,*lowercase__ : Any ): for attr in list(self.original ): setattr(self.obj ,lowercase__ ,self.original.pop(lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : int ): self.__enter__() self._active_patches.append(self ) def SCREAMING_SNAKE_CASE ( self : Dict ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
41
import argparse import json from tqdm import tqdm def _A ( ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , ) __SCREAMING_SNAKE_CASE = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: __SCREAMING_SNAKE_CASE = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): __SCREAMING_SNAKE_CASE = dpr_record["question"] __SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(__snake_case ) + "\n" ) if __name__ == "__main__": main()
693
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: if not (isinstance(__UpperCamelCase ,__UpperCamelCase ) and isinstance(__UpperCamelCase ,__UpperCamelCase )): raise ValueError('longest_common_substring() takes two strings for inputs' ) lowerCamelCase_ = len(__UpperCamelCase ) lowerCamelCase_ = len(__UpperCamelCase ) lowerCamelCase_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] lowerCamelCase_ = 0 lowerCamelCase_ = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: lowerCamelCase_ = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: lowerCamelCase_ = i lowerCamelCase_ = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
42
def _A ( __snake_case :int = 10**9 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
693
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _a ( unittest.TestCase ): def __init__( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Any=10 , UpperCamelCase_: Optional[int]=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=None , UpperCamelCase_: Tuple=True , UpperCamelCase_: str=[0.5, 0.5, 0.5] , UpperCamelCase_: List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase_: Dict=None , ) -> Any: """simple docstring""" lowercase__ = size if size is not None else {'''shortest_edge''': 18} lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = num_frames lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std lowercase__ = crop_size def lowerCamelCase_ ( self: int ) -> List[str]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = VivitImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = VivitImageProcessingTester(self ) @property def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowerCamelCase_ ( self: Any ) -> Optional[Any]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self: List[str] ) -> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
43
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _snake_case , _snake_case , _snake_case : List[Any] = False, False, False @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =None SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =None # Automatically constructed SCREAMING_SNAKE_CASE__ ="dict" SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Optional[int]: return self.pa_type def __lowerCAmelCase ( self, _a ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_a, _a ): return {"bytes": None, "path": value} elif isinstance(_a, _a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE = BytesIO() sf.write(_a, value["array"], value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67 __SCREAMING_SNAKE_CASE = BytesIO(bytes() ) sf.write(_a, _a, value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __lowerCAmelCase ( self, _a, _a = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __SCREAMING_SNAKE_CASE = token_per_repo_id or {} __SCREAMING_SNAKE_CASE = path.split("::" )[-1] try: __SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"] __SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE = None with xopen(_a, "rb", use_auth_token=_a ) as f: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) __SCREAMING_SNAKE_CASE = array.T if self.mono: __SCREAMING_SNAKE_CASE = librosa.to_mono(_a ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __lowerCAmelCase ( self, _a ) -> pa.StructArray: if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("bytes" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("path" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() ) return array_cast(_a, self.pa_type ) def __lowerCAmelCase ( self, _a ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_a ): with xopen(_a, "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() return bytes_ __SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) __SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(_a, self.pa_type )
693
0
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class UpperCAmelCase__ ( A ): def __init__( self : Tuple,__A : Any=None,__A : Dict=None,*__A : Tuple,**__A : List[Any] ): super().__init__(*__A,**__A ) if config is None: assert isinstance(self.model,__A ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f' {self.model.__class__}' ) _lowerCamelCase : List[str] = self.model.config else: _lowerCamelCase : Any = config _lowerCamelCase : Union[str, Any] = data_args _lowerCamelCase : int = self.config.tgt_vocab_size if isinstance(self.config,__A ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for' " padding.." ) if self.args.label_smoothing == 0: _lowerCamelCase : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _lowerCamelCase : int = label_smoothed_nll_loss def lowerCamelCase_ ( self : Optional[int],__A : int ): if self.optimizer is None: _lowerCamelCase : Any = ["bias", "LayerNorm.weight"] _lowerCamelCase : str = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] _lowerCamelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _lowerCamelCase : Any = Adafactor _lowerCamelCase : Any = {"scale_parameter": False, "relative_step": False} else: _lowerCamelCase : int = AdamW _lowerCamelCase : List[str] = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } _lowerCamelCase : Union[str, Any] = self.args.learning_rate if self.sharded_ddp: _lowerCamelCase : List[str] = OSS( params=__A,optim=__A,**__A,) else: _lowerCamelCase : str = optimizer_cls(__A,**__A ) if self.lr_scheduler is None: _lowerCamelCase : List[str] = self._get_lr_scheduler(__A ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def lowerCamelCase_ ( self : Optional[int],__A : List[Any] ): _lowerCamelCase : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _lowerCamelCase : int = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _lowerCamelCase : Optional[int] = schedule_func(self.optimizer,num_warmup_steps=self.args.warmup_steps ) else: _lowerCamelCase : Tuple = schedule_func( self.optimizer,num_warmup_steps=self.args.warmup_steps,num_training_steps=__A ) return scheduler def lowerCamelCase_ ( self : Union[str, Any] ): if isinstance(self.train_dataset,torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED),) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : Any ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _lowerCamelCase : Optional[Any] = model(**__A,use_cache=__A )[0] _lowerCamelCase : Optional[int] = self.loss_fn(logits.view(-1,logits.shape[-1] ),labels.view(-1 ) ) else: # compute usual loss via models _lowerCamelCase , _lowerCamelCase : str = model(**__A,labels=__A,use_cache=__A )[:2] else: # compute label smoothed loss _lowerCamelCase : int = model(**__A,use_cache=__A )[0] _lowerCamelCase : str = torch.nn.functional.log_softmax(__A,dim=-1 ) _lowerCamelCase , _lowerCamelCase : Optional[int] = self.loss_fn(__A,__A,self.args.label_smoothing,ignore_index=self.config.pad_token_id ) return loss, logits def lowerCamelCase_ ( self : List[str],__A : int,__A : str ): _lowerCamelCase : List[Any] = inputs.pop("labels" ) _lowerCamelCase , _lowerCamelCase : int = self._compute_loss(__A,__A,__A ) return loss def lowerCamelCase_ ( self : Union[str, Any],__A : nn.Module,__A : Dict[str, Union[torch.Tensor, Any]],__A : bool,__A : Optional[List[str]] = None,): _lowerCamelCase : List[str] = self._prepare_inputs(__A ) _lowerCamelCase : Dict = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _lowerCamelCase : Dict = self.model.generate( inputs["input_ids"],attention_mask=inputs["attention_mask"],**__A,) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _lowerCamelCase : Dict = self._pad_tensors_to_max_len(__A,gen_kwargs["max_length"] ) _lowerCamelCase : Optional[Any] = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data _lowerCamelCase , _lowerCamelCase : List[Any] = self._compute_loss(__A,__A,__A ) _lowerCamelCase : List[Any] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _lowerCamelCase : List[Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _lowerCamelCase : Any = self._pad_tensors_to_max_len(__A,gen_kwargs["max_length"] ) return (loss, logits, labels) def lowerCamelCase_ ( self : int,__A : Dict,__A : int ): # If PAD token is not defined at least EOS token has to be defined _lowerCamelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" f' padded to `max_length`={max_length}' ) _lowerCamelCase : Union[str, Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length),dtype=tensor.dtype,device=tensor.device ) _lowerCamelCase : Dict = tensor return padded_tensor
44
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00} config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, **_a ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> str: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
693
0
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : int = (PNDMScheduler,) _snake_case : Any = (("""num_inference_steps""", 50),) def __a ( self :str , **lowerCamelCase__ :int ): UpperCamelCase__ :Any = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**lowerCamelCase__ ) return config def __a ( self :Dict , lowerCamelCase__ :List[Any]=0 , **lowerCamelCase__ :List[str] ): UpperCamelCase__ :Optional[int] = dict(self.forward_default_kwargs ) UpperCamelCase__ :int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = self.dummy_sample UpperCamelCase__ :Tuple = 0.1 * sample UpperCamelCase__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCamelCase__ :str = self.get_scheduler_config(**lowerCamelCase__ ) UpperCamelCase__ :Tuple = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCamelCase__ :str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCamelCase__ :int = scheduler_class.from_pretrained(lowerCamelCase__ ) new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCamelCase__ :List[str] = dummy_past_residuals[:] UpperCamelCase__ :Dict = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :List[str] = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCamelCase__ :Optional[Any] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :Optional[int] = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __a ( self :List[Any] ): pass def __a ( self :Optional[int] , lowerCamelCase__ :Tuple=0 , **lowerCamelCase__ :Tuple ): UpperCamelCase__ :List[Any] = dict(self.forward_default_kwargs ) UpperCamelCase__ :List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ ) UpperCamelCase__ :List[str] = self.dummy_sample UpperCamelCase__ :Union[str, Any] = 0.1 * sample UpperCamelCase__ :Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCamelCase__ :Optional[Any] = self.get_scheduler_config() UpperCamelCase__ :Optional[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) UpperCamelCase__ :int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) UpperCamelCase__ :Optional[int] = dummy_past_residuals[:] UpperCamelCase__ :Any = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :Tuple = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCamelCase__ :Any = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :Union[str, Any] = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __a ( self :Dict , **lowerCamelCase__ :Union[str, Any] ): UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0] UpperCamelCase__ :int = self.get_scheduler_config(**lowerCamelCase__ ) UpperCamelCase__ :str = scheduler_class(**lowerCamelCase__ ) UpperCamelCase__ :int = 10 UpperCamelCase__ :Any = self.dummy_model() UpperCamelCase__ :int = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCamelCase__ :str = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCamelCase__ :Dict = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :List[str] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample return sample def __a ( self :List[Any] ): UpperCamelCase__ :List[str] = dict(self.forward_default_kwargs ) UpperCamelCase__ :int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ ) for scheduler_class in self.scheduler_classes: UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config() UpperCamelCase__ :int = scheduler_class(**lowerCamelCase__ ) UpperCamelCase__ :Dict = self.dummy_sample UpperCamelCase__ :Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase__ , """set_timesteps""" ): scheduler.set_timesteps(lowerCamelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , """set_timesteps""" ): UpperCamelCase__ :Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCamelCase__ :Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCamelCase__ :Tuple = dummy_past_residuals[:] UpperCamelCase__ :int = scheduler.step_prk(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :Dict = scheduler.step_prk(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCamelCase__ :int = scheduler.step_plms(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample UpperCamelCase__ :Union[str, Any] = scheduler.step_plms(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __a ( self :Any ): for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def __a ( self :str ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCamelCase__ ) UpperCamelCase__ :Tuple = self.scheduler_classes[0] UpperCamelCase__ :Tuple = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase__ :Optional[int] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def __a ( self :List[str] ): for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ ) def __a ( self :Tuple ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def __a ( self :List[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def __a ( self :Any ): for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCamelCase__ ) def __a ( self :List[Any] ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=lowerCamelCase__ ) def __a ( self :Dict ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCamelCase__ :int = 27 for scheduler_class in self.scheduler_classes: UpperCamelCase__ :Tuple = self.dummy_sample UpperCamelCase__ :List[str] = 0.1 * sample UpperCamelCase__ :List[Any] = self.get_scheduler_config() UpperCamelCase__ :Any = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCamelCase__ :Union[str, Any] = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample def __a ( self :str ): with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0] UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config() UpperCamelCase__ :str = scheduler_class(**lowerCamelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def __a ( self :Optional[int] ): UpperCamelCase__ :Tuple = self.full_loop() UpperCamelCase__ :str = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ :Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def __a ( self :Dict ): UpperCamelCase__ :str = self.full_loop(prediction_type="""v_prediction""" ) UpperCamelCase__ :Tuple = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ :str = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def __a ( self :Union[str, Any] ): # We specify different beta, so that the first alpha is 0.99 UpperCamelCase__ :Dict = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 ) UpperCamelCase__ :Union[str, Any] = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ :Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def __a ( self :Dict ): # We specify different beta, so that the first alpha is 0.99 UpperCamelCase__ :Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 ) UpperCamelCase__ :List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ :Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
45
import random from .binary_exp_mod import bin_exp_mod def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __SCREAMING_SNAKE_CASE = n - 1 __SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __SCREAMING_SNAKE_CASE = 0 while count < prec: __SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 ) __SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case ) if b != 1: __SCREAMING_SNAKE_CASE = True for _ in range(__snake_case ): if b == n - 1: __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _snake_case : int = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
693
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available _lowerCAmelCase : Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
46
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __SCREAMING_SNAKE_CASE = ksize + 1 __SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__snake_case ): for x in range(__snake_case ): # distance from center __SCREAMING_SNAKE_CASE = x - ksize // 2 __SCREAMING_SNAKE_CASE = y - ksize // 2 # degree to radiant __SCREAMING_SNAKE_CASE = theta / 180 * np.pi __SCREAMING_SNAKE_CASE = np.cos(_theta ) __SCREAMING_SNAKE_CASE = np.sin(_theta ) # get kernel x __SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py # get kernel y __SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py # fill kernel __SCREAMING_SNAKE_CASE = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _snake_case : Union[str, Any] = imread('../image_data/lena.jpg') # turn image in gray scale value _snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _snake_case : int = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: _snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _snake_case : Optional[Any] = out / out.max() * 2_55 _snake_case : Union[str, Any] = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
693
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
47
def _A ( __snake_case :int ) -> int: """simple docstring""" assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0''' raise ValueError(__snake_case ) else: __SCREAMING_SNAKE_CASE = sylvester(number - 1 ) __SCREAMING_SNAKE_CASE = num - 1 __SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
693
0
'''simple docstring''' UpperCAmelCase__ : str = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} UpperCAmelCase__ : Any = ["a", "b", "c", "d", "e"] def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = start # add current to visited visited.append(UpperCamelCase_ ) lowerCAmelCase__ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # if all neighbors visited add current to sort sort.append(UpperCamelCase_ ) # if all vertices haven't been visited select a new one to visit if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): for vertice in vertices: if vertice not in visited: lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # return sort return sort if __name__ == "__main__": UpperCAmelCase__ : Dict = topological_sort("a", [], []) print(sort)
48
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_a ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @require_tf def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], [ {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, {"score": 0.333, "label": ANY(_a )}, ], ], ) @slow @require_torch def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes __SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(_a ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) __SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(_a ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
693
0
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
from __future__ import annotations import math def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def _A ( ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] __SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
693
0
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase : Dict = logging.get_logger(__name__) enable_full_determinism() class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (32, 32) lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] ).to(_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (3, 32, 32) @property def UpperCamelCase_ ( self ): return (3, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 4 lowerCamelCase__ = (32, 32) lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] ).to(_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (4, 32, 32) @property def UpperCamelCase_ ( self ): return (4, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(_lowerCAmelCase ) lowerCamelCase__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) model.to(_lowerCAmelCase ) lowerCamelCase__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCamelCase_ ( self ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ) model_accelerate.to(_lowerCAmelCase ) model_accelerate.eval() lowerCamelCase__ = torch.randn( 1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCamelCase__ = noise.to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(_lowerCAmelCase ) lowerCamelCase__ = model_accelerate(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" ,output_loading_info=_lowerCAmelCase ,low_cpu_mem_usage=_lowerCAmelCase ) model_normal_load.to(_lowerCAmelCase ) model_normal_load.eval() lowerCamelCase__ = model_normal_load(_lowerCAmelCase ,_lowerCAmelCase )["""sample"""] assert torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(_lowerCAmelCase ) lowerCamelCase__ = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCamelCase__ = noise.to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) ) class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = UNetaDModel _UpperCamelCase = 'sample' @property def UpperCamelCase_ ( self ,_lowerCAmelCase=(32, 32) ): lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=_lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase_ ( self ): return (3, 32, 32) @property def UpperCamelCase_ ( self ): return (3, 32, 32) def UpperCamelCase_ ( self ): lowerCamelCase__ = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict @slow def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ,output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(_lowerCAmelCase ) lowerCamelCase__ = self.dummy_input lowerCamelCase__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(_lowerCAmelCase ) lowerCamelCase__ = noise lowerCamelCase__ = model(**_lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(_lowerCAmelCase ) lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (2_56, 2_56) lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-2 ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(_lowerCAmelCase ) lowerCamelCase__ = 4 lowerCamelCase__ = 3 lowerCamelCase__ = (32, 32) lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) lowerCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCamelCase__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-2 ) ) def UpperCamelCase_ ( self ): # not required for this model pass
50
def _A ( __snake_case :bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def _A ( __snake_case :str ) -> bytes: """simple docstring""" if (len(__snake_case ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __snake_case ( ) -> int: """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict ): super().__init__() UpperCAmelCase = nn.Linear(3 , 4 ) UpperCAmelCase = nn.BatchNormad(4 ) UpperCAmelCase = nn.Linear(4 , 5 ) def __snake_case ( self : Dict , a__ : Tuple ): return self.lineara(self.batchnorm(self.lineara(a__ ) ) ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) def __snake_case ( self : List[Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : str , a__ : int ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase, UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __snake_case ( self : Any ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(a__ : Dict ): pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : int ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : str ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : str ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(a__ ) as cm: mock_training_loop_function(128 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __snake_case ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __snake_case ( self : Tuple ): UpperCAmelCase = torch.cuda.memory_allocated() UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , a__ ) UpperCAmelCase = release_memory(a__ ) self.assertEqual(torch.cuda.memory_allocated() , a__ )
51
from functools import lru_cache def _A ( __snake_case :int ) -> set: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__snake_case ) if n > 1: factors.add(__snake_case ) return factors @lru_cache def _A ( __snake_case :int ) -> int: """simple docstring""" return len(unique_prime_factors(__snake_case ) ) def _A ( __snake_case :list ) -> bool: """simple docstring""" return len(set(__snake_case ) ) in (0, 1) def _A ( __snake_case :int ) -> list: """simple docstring""" __SCREAMING_SNAKE_CASE = 2 while True: # Increment each value of a generated range __SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. __SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group] checker.append(__snake_case ) # If all numbers in the list are equal, return the group variable. if equality(__snake_case ): return group # Increment our base variable by 1 base += 1 def _A ( __snake_case :int = 4 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = run(__snake_case ) return results[0] if len(__snake_case ) else None if __name__ == "__main__": print(solution())
693
0
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __A ( a_ :BertModel , a_ :str , a_ :str) -> str: __a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') __a : Any = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(a_): os.makedirs(a_) __a : List[Any] = model.state_dict() def to_tf_var_name(a_ :str): for patt, repl in iter(a_): __a : int = name.replace(a_ , a_) return F"""bert/{name}""" def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session): __a : int = tf.dtypes.as_dtype(tensor.dtype) __a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(a_) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __a : Any = to_tf_var_name(a_) __a : Tuple = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): __a : List[Any] = torch_tensor.T __a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_) tf.keras.backend.set_value(a_ , a_) __a : int = session.run(a_) print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""") __a : Tuple = tf.train.Saver(tf.trainable_variables()) saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt''')) def __A ( a_ :int=None) -> str: __a : str = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''') parser.add_argument( '''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''') parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''') parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''') __a : Optional[Any] = parser.parse_args(a_) __a : Optional[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name) if __name__ == "__main__": main()
52
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _A ( __snake_case :Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(__snake_case , __snake_case ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = "huggingface/label-files" if "kinetics" in model_name: __SCREAMING_SNAKE_CASE = 400 __SCREAMING_SNAKE_CASE = "kinetics400-id2label.json" elif "ssv2" in model_name: __SCREAMING_SNAKE_CASE = 174 __SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]: """simple docstring""" if "small" in model_name: __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 1536 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 192 __SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: __SCREAMING_SNAKE_CASE = 1024 __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: __SCREAMING_SNAKE_CASE = 1280 __SCREAMING_SNAKE_CASE = 5120 __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = 640 __SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def _A ( __snake_case :List[Any] ) -> Optional[int]: """simple docstring""" if "encoder." in name: __SCREAMING_SNAKE_CASE = name.replace("encoder." , "" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: __SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: __SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" ) return name def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case ) if key.startswith("encoder." ): __SCREAMING_SNAKE_CASE = key.replace("encoder." , "" ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split("." ) if key.startswith("decoder.blocks" ): __SCREAMING_SNAKE_CASE = config.decoder_hidden_size __SCREAMING_SNAKE_CASE = int(key_split[2] ) __SCREAMING_SNAKE_CASE = "decoder.decoder_layers." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = config.hidden_size __SCREAMING_SNAKE_CASE = int(key_split[1] ) __SCREAMING_SNAKE_CASE = "videomae.encoder.layer." if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def _A ( ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __SCREAMING_SNAKE_CASE = np.load(__snake_case ) return list(__snake_case ) def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case ) if "finetuned" in model_name: __SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case ) else: __SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case ) # download original checkpoint, hosted on Google Drive __SCREAMING_SNAKE_CASE = "pytorch_model.bin" gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" ) if "model" in files: __SCREAMING_SNAKE_CASE = files["model"] else: __SCREAMING_SNAKE_CASE = files["module"] __SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify model on basic input __SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __SCREAMING_SNAKE_CASE = prepare_video() __SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" ) if "finetuned" not in model_name: __SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __SCREAMING_SNAKE_CASE = torch.load(__snake_case ) __SCREAMING_SNAKE_CASE = model(**__snake_case ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one __SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": __SCREAMING_SNAKE_CASE = torch.Size([1, 400] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": __SCREAMING_SNAKE_CASE = torch.Size([1, 174] ) __SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": __SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(__snake_case , __snake_case , atol=1e-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": _snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the ๐Ÿค— hub.' ) _snake_case : Optional[int] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
693
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _snake_case : Union[str, Any] = 16 _snake_case : Optional[Any] = 32 def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ): __lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) __lowerCAmelCase = load_dataset('glue', 'mrpc' ) def tokenize_function(lowerCAmelCase_ : Any ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' ) def collate_fn(lowerCAmelCase_ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) __lowerCAmelCase = DataLoader( tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811 def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1": __lowerCAmelCase = 2 # Initialize accelerator __lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config['lr'] __lowerCAmelCase = int(config['num_epochs'] ) __lowerCAmelCase = int(config['seed'] ) __lowerCAmelCase = int(config['batch_size'] ) __lowerCAmelCase = evaluate.load('glue', 'mrpc' ) # If the batch size is too big we use gradient accumulation __lowerCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE __lowerCAmelCase = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_ ) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ ) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCAmelCase = model(**lowerCAmelCase_ ) __lowerCAmelCase = outputs.loss __lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __lowerCAmelCase = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**lowerCAmelCase_ ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCAmelCase_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase_, references=lowerCAmelCase_, ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ ) def a_ ( ): __lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.', ) parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_, lowerCAmelCase_ ) if __name__ == "__main__": main()
53
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self, *_a, **_a ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead.", _a, ) super().__init__(*_a, **_a )
693
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Optional[int] =logging.get_logger(__name__) __lowercase : Optional[Any] ={ """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class A ( __lowercase ): _snake_case ='''xlm-prophetnet''' _snake_case =['''past_key_values'''] _snake_case ={ '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self: int , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[Union[str, Callable]] = "gelu" , _lowerCAmelCase: Optional[int] = 3_0522 , _lowerCAmelCase: Optional[int] = 1024 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[int] = 512 , _lowerCAmelCase: Optional[float] = 0.02 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 2 , _lowerCAmelCase: Optional[int] = 32 , _lowerCAmelCase: Optional[int] = 128 , _lowerCAmelCase: Optional[bool] = False , _lowerCAmelCase: Optional[float] = 0.0 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 2 , **_lowerCAmelCase: str , ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =vocab_size UpperCAmelCase_ =hidden_size UpperCAmelCase_ =encoder_ffn_dim UpperCAmelCase_ =num_encoder_layers UpperCAmelCase_ =num_encoder_attention_heads UpperCAmelCase_ =decoder_ffn_dim UpperCAmelCase_ =num_decoder_layers UpperCAmelCase_ =num_decoder_attention_heads UpperCAmelCase_ =max_position_embeddings UpperCAmelCase_ =init_std # Normal(0, this parameter) UpperCAmelCase_ =activation_function # parameters for xlmprophetnet UpperCAmelCase_ =ngram UpperCAmelCase_ =num_buckets UpperCAmelCase_ =relative_max_distance UpperCAmelCase_ =disable_ngram_loss UpperCAmelCase_ =eps # 3 Types of Dropout UpperCAmelCase_ =attention_dropout UpperCAmelCase_ =activation_dropout UpperCAmelCase_ =dropout UpperCAmelCase_ =use_cache super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) @property def lowerCAmelCase__ ( self: List[Any] ) -> int: '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] ) -> Tuple: '''simple docstring''' raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
54
from math import sqrt def _A ( __snake_case :int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(__snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(__snake_case ): total += i + n // i elif i == sqrt(__snake_case ): total += i return total - n def _A ( __snake_case :int = 1_0000 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , __snake_case ) if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
693
0
import copy import re class UpperCAmelCase : '''simple docstring''' snake_case_ = "hp" snake_case_ = {} snake_case_ = None @classmethod def UpperCamelCase_ ( cls : Optional[Any] ,A : List[str] ,A : List[str] ): __A = prefix __A = defaults cls.build_naming_info() @staticmethod def UpperCamelCase_ ( A : str ,A : Optional[int] ): if len(A ) == 0: return "" __A = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A ) + 1 ): __A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A : Union[str, Any] ): __A = "" while integer != 0: __A = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __A = 0 while True: __A = word + "#" + int_to_alphabetic(A ) if sword in info["reverse_short_word"]: continue else: __A = sword break __A = short_word __A = word return short_word @staticmethod def UpperCamelCase_ ( A : Tuple ,A : str ): __A = param_name.split("_" ) __A = [TrialShortNamer.shortname_for_word(A ,A ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __A = ["", "_"] for separator in separators: __A = separator.join(A ) if shortname not in info["reverse_short_param"]: __A = shortname __A = param_name return shortname return param_name @staticmethod def UpperCamelCase_ ( A : Union[str, Any] ,A : List[Any] ): __A = TrialShortNamer.shortname_for_key(A ,A ) __A = short_name __A = param_name @classmethod def UpperCamelCase_ ( cls : List[Any] ): if cls.NAMING_INFO is not None: return __A = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A ,A ) __A = info @classmethod def UpperCamelCase_ ( cls : Union[str, Any] ,A : int ): cls.build_naming_info() assert cls.PREFIX is not None __A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __A = cls.NAMING_INFO["short_param"][k] if isinstance(A ,A ): __A = 1 if v else 0 __A = "" if isinstance(A ,(int, float) ) else "-" __A = f'''{key}{sep}{v}''' name.append(A ) return "_".join(A ) @classmethod def UpperCamelCase_ ( cls : List[str] ,A : Tuple ): __A = repr[len(cls.PREFIX ) + 1 :] if repr == "": __A = [] else: __A = repr.split("_" ) __A = {} for value in values: if "-" in value: __A , __A = value.split("-" ) else: __A = re.sub("[0-9.]" ,"" ,A ) __A = float(re.sub("[^0-9.]" ,"" ,A ) ) __A = cls.NAMING_INFO["reverse_short_param"][p_k] __A = p_v for k in cls.DEFAULTS: if k not in parameters: __A = cls.DEFAULTS[k] return parameters
55
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
693
0
'''simple docstring''' _a : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a : int = [{"type": "code", "content": INSTALL_CONTENT}] _a : Optional[int] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
56
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __SCREAMING_SNAKE_CASE : def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = encoder_seq_length __SCREAMING_SNAKE_CASE = decoder_seq_length # For common tests __SCREAMING_SNAKE_CASE = self.decoder_seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = d_ff __SCREAMING_SNAKE_CASE = relative_attention_num_buckets __SCREAMING_SNAKE_CASE = dropout_rate __SCREAMING_SNAKE_CASE = initializer_factor __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = decoder_layers def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig.from_pretrained("google/umt5-base" ) def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int: if attention_mask is None: __SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = config.num_attention_heads __SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a ) return config, input_dict def __lowerCAmelCase ( self ) -> List[str]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: return TaConfig( vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return TaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ) model.to(_a ) model.eval() __SCREAMING_SNAKE_CASE = model( input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, ) __SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a ) __SCREAMING_SNAKE_CASE = result.last_hidden_state __SCREAMING_SNAKE_CASE = result.past_key_values __SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ), config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ), 4 ) def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) __SCREAMING_SNAKE_CASE = model(_a ) __SCREAMING_SNAKE_CASE = model(_a, use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 ) __SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) ) def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]: __SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval() __SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ =( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =False SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE__ =[0.8, 0.9] def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __lowerCAmelCase ( self ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision" ) def __lowerCAmelCase ( self ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def __lowerCAmelCase ( self ) -> Tuple: __SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs[0] __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) __SCREAMING_SNAKE_CASE = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ), } for attn_name, (name, mask) in zip(_a, head_masking.items() ): __SCREAMING_SNAKE_CASE = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __SCREAMING_SNAKE_CASE = torch.ones( config.num_decoder_layers, config.num_heads, device=_a ) __SCREAMING_SNAKE_CASE = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, ) # We check the state of decoder_attentions and cross_attentions just from the last step __SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __lowerCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __lowerCAmelCase ( self ) -> List[Any]: __SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a ) __SCREAMING_SNAKE_CASE = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_a, _a ) __SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) ) __SCREAMING_SNAKE_CASE = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a ) self.assertEqual(_a, _a )
693
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _lowerCAmelCase: """simple docstring""" a : List[str] =BlenderbotConfig a : Union[str, Any] ={} a : Any ='''gelu''' def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=2_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ): UpperCamelCase_: List[str] = parent UpperCamelCase_: int = batch_size UpperCamelCase_: Optional[int] = seq_length UpperCamelCase_: Any = is_training UpperCamelCase_: Any = use_labels UpperCamelCase_: Dict = vocab_size UpperCamelCase_: Optional[Any] = hidden_size UpperCamelCase_: Tuple = num_hidden_layers UpperCamelCase_: List[str] = num_attention_heads UpperCamelCase_: List[Any] = intermediate_size UpperCamelCase_: List[str] = hidden_dropout_prob UpperCamelCase_: Optional[int] = attention_probs_dropout_prob UpperCamelCase_: Union[str, Any] = max_position_embeddings UpperCamelCase_: Optional[Any] = eos_token_id UpperCamelCase_: Union[str, Any] = pad_token_id UpperCamelCase_: Optional[Any] = bos_token_id def _a ( self ): UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_: str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_: int = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_: str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_: Optional[int] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return config, inputs_dict def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Dict = TFBlenderbotModel(config=_lowerCamelCase ).get_decoder() UpperCamelCase_: Any = inputs_dict['input_ids'] UpperCamelCase_: Dict = input_ids[:1, :] UpperCamelCase_: Dict = inputs_dict['attention_mask'][:1, :] UpperCamelCase_: Optional[Any] = inputs_dict['head_mask'] UpperCamelCase_: List[Any] = 1 # first forward pass UpperCamelCase_: Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase ) UpperCamelCase_ ,UpperCamelCase_: List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase_: Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase_: int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase_: List[str] = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase_: Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase_: str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> List[Any]: if attention_mask is None: UpperCamelCase_: List[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCamelCase_: Union[str, Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_: Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCamelCase_: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () a : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () a : Optional[Any] =( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) a : List[str] =True a : Optional[int] =False a : Tuple =False def _a ( self ): UpperCamelCase_: Any = TFBlenderbotModelTester(self ) UpperCamelCase_: int = ConfigTester(self , config_class=_lowerCamelCase ) def _a ( self ): self.config_tester.run_common_tests() def _a ( self ): UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase ) @require_tokenizers @require_tf class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" a : Union[str, Any] =['''My friends are cool but they eat too many carbs.'''] a : Union[str, Any] ='''facebook/blenderbot-400M-distill''' @cached_property def _a ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self ): UpperCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _a ( self ): UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , return_tensors='tf' ) UpperCamelCase_: List[Any] = self.model.generate( model_inputs.input_ids , ) UpperCamelCase_: List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
57
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __SCREAMING_SNAKE_CASE = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) __SCREAMING_SNAKE_CASE = model.state_dict() def to_tf_var_name(__snake_case :str ): for patt, repl in iter(__snake_case ): __SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case ) return f'''bert/{name}''' def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ): __SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype ) __SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case ) __SCREAMING_SNAKE_CASE = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __SCREAMING_SNAKE_CASE = torch_tensor.T __SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) __SCREAMING_SNAKE_CASE = session.run(__snake_case ) print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) __SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _A ( __snake_case :str=None ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) __SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
693
0
"""simple docstring""" from __future__ import annotations __lowerCAmelCase : List[Any] = 10 def __lowerCAmelCase ( __UpperCamelCase : list[int] ): '''simple docstring''' snake_case_ : Optional[Any] = 1 snake_case_ : Any = max(__UpperCamelCase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ : list[list] = [[] for _ in range(__UpperCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ : str = int((i / placement) % RADIX ) buckets[tmp].append(__UpperCamelCase ) # put each buckets' contents into list_of_ints snake_case_ : Optional[int] = 0 for b in range(__UpperCamelCase ): for i in buckets[b]: snake_case_ : str = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
58
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _snake_case : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""] def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str: super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_a, np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa ) elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(_a ).T] # verify inputs are valid for idx, example in enumerate(_a ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = "max_length" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( _a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a ) return padded_inputs
693
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Dict: """simple docstring""" lowerCamelCase__: Optional[Any] =original_name.split("." )[0] lowerCamelCase__: Any =key.split("." ) lowerCamelCase__: Optional[Any] =int(key_list[key_list.index(__a ) - 2] ) lowerCamelCase__: List[str] =int(key_list[key_list.index(__a ) - 1] ) lowerCamelCase__: Union[str, Any] =orig_block_num - offset lowerCamelCase__: List[str] =key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =OrderedDict() lowerCamelCase__ , lowerCamelCase__: int =0, 0 for key, value in state_dict.items(): if key.startswith("network" ): lowerCamelCase__: Union[str, Any] =key.replace("network" , "poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 lowerCamelCase__: List[Any] =key[: key.find("proj" )] lowerCamelCase__: Optional[Any] =key.replace(__a , F"""patch_embeddings.{total_embed_found}.""" ) lowerCamelCase__: List[str] =key.replace("proj" , "projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: lowerCamelCase__: Tuple ="poolformer.encoder." + key if "mlp.fc1" in key: lowerCamelCase__: Union[str, Any] =replace_key_with_offset(__a , __a , "mlp.fc1" , "output.conv1" ) if "mlp.fc2" in key: lowerCamelCase__: Optional[int] =replace_key_with_offset(__a , __a , "mlp.fc2" , "output.conv2" ) if "norm1" in key: lowerCamelCase__: Union[str, Any] =replace_key_with_offset(__a , __a , "norm1" , "before_norm" ) if "norm2" in key: lowerCamelCase__: List[str] =replace_key_with_offset(__a , __a , "norm2" , "after_norm" ) if "layer_scale_1" in key: lowerCamelCase__: str =replace_key_with_offset(__a , __a , "layer_scale_1" , "layer_scale_1" ) if "layer_scale_2" in key: lowerCamelCase__: Any =replace_key_with_offset(__a , __a , "layer_scale_2" , "layer_scale_2" ) if "head" in key: lowerCamelCase__: int =key.replace("head" , "classifier" ) lowerCamelCase__: List[str] =value return new_state_dict def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase__: Optional[int] ="http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase__: Optional[int] =Image.open(requests.get(__a , stream=__a ).raw ) return image @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =PoolFormerConfig() # set attributes based on model_name lowerCamelCase__: int ="huggingface/label-files" lowerCamelCase__: Any =model_name[-3:] lowerCamelCase__: int =1000 lowerCamelCase__: List[Any] ="imagenet-1k-id2label.json" lowerCamelCase__: Any =(1, 1000) # set config attributes lowerCamelCase__: Optional[Any] =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()} lowerCamelCase__: Optional[int] =idalabel lowerCamelCase__: int ={v: k for k, v in idalabel.items()} if size == "s12": lowerCamelCase__: Optional[int] =[2, 2, 6, 2] lowerCamelCase__: List[Any] =[64, 128, 320, 512] lowerCamelCase__: Optional[Any] =4.0 lowerCamelCase__: int =0.9 elif size == "s24": lowerCamelCase__: List[str] =[4, 4, 12, 4] lowerCamelCase__: str =[64, 128, 320, 512] lowerCamelCase__: Any =4.0 lowerCamelCase__: str =0.9 elif size == "s36": lowerCamelCase__: Any =[6, 6, 18, 6] lowerCamelCase__: Optional[int] =[64, 128, 320, 512] lowerCamelCase__: int =4.0 lowerCamelCase__: Dict =1e-6 lowerCamelCase__: Any =0.9 elif size == "m36": lowerCamelCase__: Union[str, Any] =[6, 6, 18, 6] lowerCamelCase__: Optional[Any] =[96, 192, 384, 768] lowerCamelCase__: Tuple =4.0 lowerCamelCase__: Union[str, Any] =1e-6 lowerCamelCase__: Optional[int] =0.9_5 elif size == "m48": lowerCamelCase__: Optional[Any] =[8, 8, 24, 8] lowerCamelCase__: str =[96, 192, 384, 768] lowerCamelCase__: Optional[int] =4.0 lowerCamelCase__: Dict =1e-6 lowerCamelCase__: Any =0.9_5 else: raise ValueError(F"""Size {size} not supported""" ) # load image processor lowerCamelCase__: str =PoolFormerImageProcessor(crop_pct=__a ) # Prepare image lowerCamelCase__: Optional[int] =prepare_img() lowerCamelCase__: Optional[int] =image_processor(images=__a , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict lowerCamelCase__: List[str] =torch.load(__a , map_location=torch.device("cpu" ) ) # rename keys lowerCamelCase__: List[Any] =rename_keys(__a ) # create HuggingFace model and load state dict lowerCamelCase__: List[str] =PoolFormerForImageClassification(__a ) model.load_state_dict(__a ) model.eval() # Define image processor lowerCamelCase__: Optional[int] =PoolFormerImageProcessor(crop_pct=__a ) lowerCamelCase__: Optional[int] =image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values # forward pass lowerCamelCase__: List[Any] =model(__a ) lowerCamelCase__: Any =outputs.logits # define expected logit slices for different models if size == "s12": lowerCamelCase__: Optional[int] =torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": lowerCamelCase__: Union[str, Any] =torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": lowerCamelCase__: Dict =torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": lowerCamelCase__: Tuple =torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": lowerCamelCase__: Dict =torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(F"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , __a , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __A = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
59
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =42 SCREAMING_SNAKE_CASE__ =42 def __init__( self, _a, _a ) -> Dict: super().__init__() self.register_modules(unet=_a, scheduler=_a ) @torch.no_grad() def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet __SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(self.device ) self.scheduler.set_timesteps(_a ) self.scheduler.set_sigmas(_a ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample # prediction step __SCREAMING_SNAKE_CASE = model(_a, _a ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean __SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_a )
693
0