code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __A = logging.getLogger(__name__) class __lowerCAmelCase ( _lowerCamelCase ): """simple docstring""" snake_case_ = '''token-classification''' def __init__( self , lowerCamelCase__ ) -> Any: '''simple docstring''' if type(lowerCamelCase__ ) == dict: __lowerCamelCase = Namespace(**lowerCamelCase__ ) __lowerCamelCase = import_module('tasks' ) try: __lowerCamelCase = getattr(lowerCamelCase__ , hparams.task_type ) __lowerCamelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) __lowerCamelCase = self.token_classification_task.get_labels(hparams.labels ) __lowerCamelCase = CrossEntropyLoss().ignore_index super().__init__(lowerCamelCase__ , len(self.labels ) , self.mode ) def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.model(**lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": __lowerCamelCase = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids __lowerCamelCase = self(**lowerCamelCase__ ) __lowerCamelCase = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = self.hparams for mode in ["train", "dev", "test"]: __lowerCamelCase = self._feature_file(lowerCamelCase__ ) if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , lowerCamelCase__ ) __lowerCamelCase = torch.load(lowerCamelCase__ ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) __lowerCamelCase = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCamelCase__ ) __lowerCamelCase = self.token_classification_task.convert_examples_to_features( lowerCamelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCamelCase__ , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('Saving features into cached file %s' , lowerCamelCase__ ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self._feature_file(lowerCamelCase__ ) logger.info('Loading features from cached file %s' , lowerCamelCase__ ) __lowerCamelCase = torch.load(lowerCamelCase__ ) __lowerCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __lowerCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: __lowerCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: __lowerCamelCase = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) __lowerCamelCase = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , batch_size=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' """Compute validation""" "" __lowerCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": __lowerCamelCase = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids __lowerCamelCase = self(**lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = outputs[:2] __lowerCamelCase = logits.detach().cpu().numpy() __lowerCamelCase = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = torch.stack([x['val_loss'] for x in outputs] ).mean() __lowerCamelCase = np.concatenate([x['pred'] for x in outputs] , axis=0 ) __lowerCamelCase = np.argmax(lowerCamelCase__ , axis=2 ) __lowerCamelCase = np.concatenate([x['target'] for x in outputs] , axis=0 ) __lowerCamelCase = dict(enumerate(self.labels ) ) __lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )] __lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) __lowerCamelCase = { 'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(lowerCamelCase__ , lowerCamelCase__ ), 'precision': precision_score(lowerCamelCase__ , lowerCamelCase__ ), 'recall': recall_score(lowerCamelCase__ , lowerCamelCase__ ), 'f1': fa_score(lowerCamelCase__ , lowerCamelCase__ ), } __lowerCamelCase = dict(results.items() ) __lowerCamelCase = results return ret, preds_list, out_label_list def lowercase_ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._eval_end(lowerCamelCase__ ) __lowerCamelCase = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._eval_end(lowerCamelCase__ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 __lowerCamelCase = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ ) parser.add_argument( '--task_type' , default='NER' , type=lowerCamelCase__ , help='Task type to fine tune in training (e.g. NER, POS, etc)' ) parser.add_argument( '--max_seq_length' , default=128 , type=lowerCamelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--labels' , default='' , type=lowerCamelCase__ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , ) parser.add_argument( '--gpus' , default=0 , type=lowerCamelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser if __name__ == "__main__": __A = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __A = NERTransformer.add_model_specific_args(parser, os.getcwd()) __A = parser.parse_args() __A = NERTransformer(args) __A = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __A = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __A = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
353
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __A = False class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> int: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = 12 __lowerCamelCase = 12 __lowerCamelCase = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } __lowerCamelCase = TransformeraDModel(**lowerCamelCase__ ) return model def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) __lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
348
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __A = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __A = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" if "://" in dataset_path: __lowerCamelCase = dataset_path.split('://' )[1] return dataset_path def lowerCamelCase_ ( UpperCamelCase__ : fsspec.AbstractFileSystem ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowerCamelCase_ ( UpperCamelCase__ : fsspec.AbstractFileSystem , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str: """simple docstring""" __lowerCamelCase = not is_remote_filesystem(_lowerCAmelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) ) else: fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase ) def lowerCamelCase_ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = threading.Lock()
354
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A = logging.get_logger(__name__) __A = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class __lowerCamelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''imagegpt''' snake_case_ = ['''past_key_values'''] snake_case_ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowerCamelCase__=512 + 1 , lowerCamelCase__=32 * 32 , lowerCamelCase__=512 , lowerCamelCase__=24 , lowerCamelCase__=8 , lowerCamelCase__=None , lowerCamelCase__="quick_gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = n_positions __lowerCamelCase = n_embd __lowerCamelCase = n_layer __lowerCamelCase = n_head __lowerCamelCase = n_inner __lowerCamelCase = activation_function __lowerCamelCase = resid_pdrop __lowerCamelCase = embd_pdrop __lowerCamelCase = attn_pdrop __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_range __lowerCamelCase = scale_attn_weights __lowerCamelCase = use_cache __lowerCamelCase = scale_attn_by_inverse_layer_idx __lowerCamelCase = reorder_and_upcast_attn __lowerCamelCase = tie_word_embeddings super().__init__(tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ ) class __lowerCamelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 3 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , ) -> int: '''simple docstring''' __lowerCamelCase = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __lowerCamelCase = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) ) return inputs
355
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowerCamelCase = CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = backbone_config.pop('model_type' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def lowercase_ ( self ) -> Dict[str, any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
348
0
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __A = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize __A = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' __A = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' __A = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0.9 , lowerCamelCase__=3 , lowerCamelCase__=0.5 ) -> Union[str, Any]: '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): __lowerCamelCase = [ meteor_score.single_meteor_score( word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ ) for ref, pred in zip(lowercase_ , lowercase_ ) ] else: __lowerCamelCase = [ meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ ) for ref, pred in zip(lowercase_ , lowercase_ ) ] return {"meteor": np.mean(lowercase_ )}
356
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __A = logging.get_logger(__name__) # General docstring __A = '''ResNetConfig''' # Base docstring __A = '''microsoft/resnet-50''' __A = [1, 20_48, 7, 7] # Image classification docstring __A = '''microsoft/resnet-50''' __A = '''tiger cat''' __A = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 3 , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" ) -> Dict: '''simple docstring''' super().__init__() __lowerCamelCase = nn.Convad( lowerCamelCase__ , lowerCamelCase__ , kernel_size=lowerCamelCase__ , stride=lowerCamelCase__ , padding=kernel_size // 2 , bias=lowerCamelCase__ ) __lowerCamelCase = nn.BatchNormad(lowerCamelCase__ ) __lowerCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.convolution(lowerCamelCase__ ) __lowerCamelCase = self.normalization(lowerCamelCase__ ) __lowerCamelCase = self.activation(lowerCamelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' super().__init__() __lowerCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __lowerCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __lowerCamelCase = config.num_channels def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowerCamelCase = self.embedder(lowerCamelCase__ ) __lowerCamelCase = self.pooler(lowerCamelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 ) -> Any: '''simple docstring''' super().__init__() __lowerCamelCase = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , stride=lowerCamelCase__ , bias=lowerCamelCase__ ) __lowerCamelCase = nn.BatchNormad(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.convolution(lowerCamelCase__ ) __lowerCamelCase = self.normalization(lowerCamelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" ) -> Optional[Any]: '''simple docstring''' super().__init__() __lowerCamelCase = in_channels != out_channels or stride != 1 __lowerCamelCase = ( ResNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity() ) __lowerCamelCase = nn.Sequential( ResNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) , ResNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , activation=lowerCamelCase__ ) , ) __lowerCamelCase = ACTaFN[activation] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = hidden_state __lowerCamelCase = self.layer(lowerCamelCase__ ) __lowerCamelCase = self.shortcut(lowerCamelCase__ ) hidden_state += residual __lowerCamelCase = self.activation(lowerCamelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 4 ) -> List[Any]: '''simple docstring''' super().__init__() __lowerCamelCase = in_channels != out_channels or stride != 1 __lowerCamelCase = out_channels // reduction __lowerCamelCase = ( ResNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity() ) __lowerCamelCase = nn.Sequential( ResNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) , ResNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ ) , ) __lowerCamelCase = ACTaFN[activation] def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase = hidden_state __lowerCamelCase = self.layer(lowerCamelCase__ ) __lowerCamelCase = self.shortcut(lowerCamelCase__ ) hidden_state += residual __lowerCamelCase = self.activation(lowerCamelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , ) -> Optional[int]: '''simple docstring''' super().__init__() __lowerCamelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer __lowerCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , activation=config.hidden_act ) , *[layer(lowerCamelCase__ , lowerCamelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = input for layer in self.layers: __lowerCamelCase = layer(lowerCamelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' super().__init__() __lowerCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCamelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ ) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase = hidden_states + (hidden_state,) __lowerCamelCase = stage_module(lowerCamelCase__ ) if output_hidden_states: __lowerCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ , ) class __lowerCAmelCase ( A__ ): """simple docstring""" snake_case_ = ResNetConfig snake_case_ = 'resnet' snake_case_ = 'pixel_values' snake_case_ = True def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' if isinstance(lowerCamelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = value __A = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __A = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , A__ , ) class __lowerCAmelCase ( A__ ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' super().__init__(lowerCamelCase__ ) __lowerCamelCase = config __lowerCamelCase = ResNetEmbeddings(lowerCamelCase__ ) __lowerCamelCase = ResNetEncoder(lowerCamelCase__ ) __lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> int: '''simple docstring''' __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = self.embedder(lowerCamelCase__ ) __lowerCamelCase = self.encoder( lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ ) __lowerCamelCase = encoder_outputs[0] __lowerCamelCase = self.pooler(lowerCamelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '''\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , A__ , ) class __lowerCAmelCase ( A__ ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' super().__init__(lowerCamelCase__ ) __lowerCamelCase = config.num_labels __lowerCamelCase = ResNetModel(lowerCamelCase__ ) # classification head __lowerCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ ( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> str: '''simple docstring''' __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = self.resnet(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ ) __lowerCamelCase = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase = self.classifier(lowerCamelCase__ ) __lowerCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase = 'single_label_classification' else: __lowerCamelCase = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase = MSELoss() if self.num_labels == 1: __lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCamelCase = loss_fct(lowerCamelCase__ , lowerCamelCase__ ) elif self.config.problem_type == "single_label_classification": __lowerCamelCase = CrossEntropyLoss() __lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase = BCEWithLogitsLoss() __lowerCamelCase = loss_fct(lowerCamelCase__ , lowerCamelCase__ ) if not return_dict: __lowerCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '''\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ''' , A__ , ) class __lowerCAmelCase ( A__ , A__ ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> int: '''simple docstring''' super().__init__(lowerCamelCase__ ) super()._init_backbone(lowerCamelCase__ ) __lowerCamelCase = [config.embedding_size] + config.hidden_sizes __lowerCamelCase = ResNetEmbeddings(lowerCamelCase__ ) __lowerCamelCase = ResNetEncoder(lowerCamelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @replace_return_docstrings(output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> Dict: '''simple docstring''' __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = self.embedder(lowerCamelCase__ ) __lowerCamelCase = self.encoder(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __lowerCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCamelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCamelCase__ , )
357
import sys from collections import defaultdict class __lowerCAmelCase : """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.node_position[vertex] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pos def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): __lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): __lowerCamelCase = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
0
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __A = get_tests_dir("fixtures/dummy_feature_extractor_config.json") __A = get_tests_dir("fixtures/vocab.json") __A = get_tests_dir("fixtures") class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 0 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaConfig() __lowerCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) # save in new folder model_config.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) ) copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaFeatureExtractor() __lowerCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) __lowerCamelCase = WavaVecaProcessor(lowercase_ , lowercase_ ) # save in new folder processor.save_pretrained(lowercase_ ) # drop `processor_class` in tokenizer with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f: __lowerCamelCase = json.load(lowercase_ ) config_dict.pop('processor_class' ) with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f: f.write(json.dumps(lowercase_ ) ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaFeatureExtractor() __lowerCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) __lowerCamelCase = WavaVecaProcessor(lowercase_ , lowercase_ ) # save in new folder processor.save_pretrained(lowercase_ ) # drop `processor_class` in feature extractor with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f: __lowerCamelCase = json.load(lowercase_ ) config_dict.pop('processor_class' ) with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f: f.write(json.dumps(lowercase_ ) ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaConfig(processor_class='Wav2Vec2Processor' ) model_config.save_pretrained(lowercase_ ) # copy relevant files copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) ) # create emtpy sample processor with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f: f.write('{}' ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) def lowercase_ ( self ) -> int: '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase_ ): __lowerCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase_ ): __lowerCamelCase = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ ) __lowerCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) __lowerCamelCase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) __lowerCamelCase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __lowerCamelCase = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ , use_fast=lowercase_ ) __lowerCamelCase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' try: AutoConfig.register('custom' , lowercase_ ) AutoFeatureExtractor.register(lowercase_ , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) AutoProcessor.register(lowercase_ , lowercase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase_ ): AutoProcessor.register(lowercase_ , lowercase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __lowerCamelCase = CustomFeatureExtractor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = os.path.join(lowercase_ , 'vocab.txt' ) with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) __lowerCamelCase = CustomTokenizer(lowercase_ ) __lowerCamelCase = CustomProcessor(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(lowercase_ ) __lowerCamelCase = AutoProcessor.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' class __lowerCAmelCase ( a_ ): """simple docstring""" snake_case_ = False class __lowerCAmelCase ( a_ ): """simple docstring""" snake_case_ = False class __lowerCAmelCase ( a_ ): """simple docstring""" snake_case_ = '''AutoFeatureExtractor''' snake_case_ = '''AutoTokenizer''' snake_case_ = False try: AutoConfig.register('custom' , lowercase_ ) AutoFeatureExtractor.register(lowercase_ , lowercase_ ) AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ ) AutoProcessor.register(lowercase_ , lowercase_ ) # If remote code is not set, the default is to use local classes. __lowerCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __lowerCamelCase = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __lowerCamelCase = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' ) self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' ) @is_staging_test class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowercase_ ( cls ) -> Any: '''simple docstring''' __lowerCamelCase = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def lowercase_ ( cls ) -> Any: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-processor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-processor' ) except HTTPError: pass def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = WavaVecaProcessor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowercase_ , 'test-processor' ) , push_to_hub=lowercase_ , use_auth_token=self._token ) __lowerCamelCase = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = WavaVecaProcessor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowercase_ , 'test-processor-org' ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization='valid_org' , ) __lowerCamelCase = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowercase_ ( self ) -> Dict: '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __lowerCamelCase = CustomFeatureExtractor.from_pretrained(lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = os.path.join(lowercase_ , 'vocab.txt' ) with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) __lowerCamelCase = CustomTokenizer(lowercase_ ) __lowerCamelCase = CustomProcessor(lowercase_ , lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token ) __lowerCamelCase = Repository(lowercase_ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(lowercase_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { 'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor', 'AutoProcessor': 'custom_processing.CustomProcessor', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) ) as f: __lowerCamelCase = json.load(lowercase_ ) self.assertDictEqual( tokenizer_config['auto_map'] , { 'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None], 'AutoProcessor': 'custom_processing.CustomProcessor', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_feature_extraction.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_tokenization.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_processing.py' ) ) ) repo.push_to_hub() __lowerCamelCase = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=lowercase_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
358
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case_ = "question" snake_case_ = "context" snake_case_ = "answers" @property def lowercase_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __A = logging.get_logger(__name__) @add_end_docstrings( __magic_name__ , r'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def lowercase_ ( self , lowerCamelCase__ ) -> np.ndarray: '''simple docstring''' if self.framework == "tf": __lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ) else: raise ValueError('Unsupported framework' ) return masked_index def lowercase_ ( self , lowerCamelCase__ ) -> np.ndarray: '''simple docstring''' __lowerCamelCase = self.get_masked_index(__A ) __lowerCamelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' if isinstance(__A , __A ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__A ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict[str, GenericTensor]: '''simple docstring''' if return_tensors is None: __lowerCamelCase = self.framework __lowerCamelCase = self.tokenizer(__A , return_tensors=__A ) self.ensure_exactly_one_mask_token(__A ) return model_inputs def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = self.model(**__A ) __lowerCamelCase = model_inputs['input_ids'] return model_outputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=None ) -> Any: '''simple docstring''' # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __lowerCamelCase = target_ids.shape[0] __lowerCamelCase = model_outputs['input_ids'][0] __lowerCamelCase = model_outputs['logits'] if self.framework == "tf": __lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __lowerCamelCase = outputs.numpy() __lowerCamelCase = outputs[0, masked_index, :] __lowerCamelCase = stable_softmax(__A , axis=-1 ) if target_ids is not None: __lowerCamelCase = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) ) __lowerCamelCase = tf.expand_dims(__A , 0 ) __lowerCamelCase = tf.math.top_k(__A , k=__A ) __lowerCamelCase , __lowerCamelCase = topk.values.numpy(), topk.indices.numpy() else: __lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __lowerCamelCase = outputs[0, masked_index, :] __lowerCamelCase = logits.softmax(dim=-1 ) if target_ids is not None: __lowerCamelCase = probs[..., target_ids] __lowerCamelCase , __lowerCamelCase = probs.topk(__A ) __lowerCamelCase = [] __lowerCamelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): __lowerCamelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place __lowerCamelCase = input_ids.numpy().copy() if target_ids is not None: __lowerCamelCase = target_ids[p].tolist() __lowerCamelCase = p # Filter padding out: __lowerCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __lowerCamelCase = self.tokenizer.decode(__A , skip_special_tokens=__A ) __lowerCamelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(__A ) result.append(__A ) if single_mask: return result[0] return result def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]: '''simple docstring''' if isinstance(__A , __A ): __lowerCamelCase = [targets] try: __lowerCamelCase = self.tokenizer.get_vocab() except Exception: __lowerCamelCase = {} __lowerCamelCase = [] for target in targets: __lowerCamelCase = vocab.get(__A , __A ) if id_ is None: __lowerCamelCase = self.tokenizer( __A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )['input_ids'] if len(__A ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ 'We cannot replace it with anything meaningful, ignoring it' ) continue __lowerCamelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) __lowerCamelCase = list(set(__A ) ) if len(__A ) == 0: raise ValueError('At least one target must be provided when passed.' ) __lowerCamelCase = np.array(__A ) return target_ids def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None ) -> List[Any]: '''simple docstring''' __lowerCamelCase = {} if targets is not None: __lowerCamelCase = self.get_target_ids(__A , __A ) __lowerCamelCase = target_ids if top_k is not None: __lowerCamelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = super().__call__(__A , **__A ) if isinstance(__A , __A ) and len(__A ) == 1: return outputs[0] return outputs
359
import requests __A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None: """simple docstring""" __lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
348
0
__A = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __A = {value: key for key, value in encode_dict.items()} def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict: """simple docstring""" if set(UpperCamelCase__ ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) __lowerCamelCase = """""" for word in coded.split(): while len(UpperCamelCase__ ) != 0: decoded += decode_dict[word[:5]] __lowerCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
360
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
361
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if "emb" in name: __lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: __lowerCamelCase = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: __lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: __lowerCamelCase = name.replace('linear1' , 'fc1' ) if "linear2" in name: __lowerCamelCase = name.replace('linear2' , 'fc2' ) if "norm1" in name: __lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: __lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: __lowerCamelCase = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: __lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: __lowerCamelCase = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]: """simple docstring""" __lowerCamelCase = list(state_dict.keys() ) __lowerCamelCase = {} for key in keys: __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __lowerCamelCase = val[:hidden_size, :] __lowerCamelCase = val[hidden_size : 2 * hidden_size, :] __lowerCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCamelCase = val else: __lowerCamelCase = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __lowerCamelCase = 1024 __lowerCamelCase = 24 __lowerCamelCase = 16 elif checkpoint == "medium": __lowerCamelCase = 1536 __lowerCamelCase = 48 __lowerCamelCase = 24 elif checkpoint == "large": __lowerCamelCase = 2048 __lowerCamelCase = 48 __lowerCamelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) __lowerCamelCase = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]: """simple docstring""" __lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ ) __lowerCamelCase = fairseq_model.lm.state_dict() __lowerCamelCase , __lowerCamelCase = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' ) __lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' ) __lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('Incorrect shape for logits' ) # now construct the processor __lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) __lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __lowerCamelCase = 2048 __lowerCamelCase = 2048 # set other default generation config params __lowerCamelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCamelCase = True __lowerCamelCase = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class __lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = """transfo-xl""" snake_case_ = ["""mems"""] snake_case_ = { """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowerCamelCase__=267_735 , lowerCamelCase__=[20_000, 40_000, 200_000] , lowerCamelCase__=1_024 , lowerCamelCase__=1_024 , lowerCamelCase__=16 , lowerCamelCase__=64 , lowerCamelCase__=4_096 , lowerCamelCase__=4 , lowerCamelCase__=False , lowerCamelCase__=18 , lowerCamelCase__=1_600 , lowerCamelCase__=1_000 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=-1 , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="normal" , lowerCamelCase__=0.01 , lowerCamelCase__=0.01 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=0 , **lowerCamelCase__ , ) -> Dict: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = [] self.cutoffs.extend(snake_case__ ) if proj_share_all_but_first: __lowerCamelCase = [False] + [True] * len(self.cutoffs ) else: __lowerCamelCase = [False] + [False] * len(self.cutoffs ) __lowerCamelCase = d_model __lowerCamelCase = d_embed __lowerCamelCase = d_head __lowerCamelCase = d_inner __lowerCamelCase = div_val __lowerCamelCase = pre_lnorm __lowerCamelCase = n_layer __lowerCamelCase = n_head __lowerCamelCase = mem_len __lowerCamelCase = same_length __lowerCamelCase = attn_type __lowerCamelCase = clamp_len __lowerCamelCase = sample_softmax __lowerCamelCase = adaptive __lowerCamelCase = dropout __lowerCamelCase = dropatt __lowerCamelCase = untie_r __lowerCamelCase = init __lowerCamelCase = init_range __lowerCamelCase = proj_init_std __lowerCamelCase = init_std __lowerCamelCase = layer_norm_epsilon super().__init__(eos_token_id=snake_case__ , **snake_case__ ) @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
362
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''sew-d''' def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = max_position_embeddings __lowerCamelCase = position_buckets __lowerCamelCase = share_att_key __lowerCamelCase = relative_attention __lowerCamelCase = norm_rel_ebd __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = feature_layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def lowercase_ ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
0
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor __A = logging.getLogger(__name__) __A = 50 # max width of layer names __A = 70 # max width of quantizer names def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Tuple: """simple docstring""" __lowerCamelCase = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_lowercase , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_lowercase , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_lowercase , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_lowercase , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_lowercase , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_lowercase , type=_lowercase , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_lowercase , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> str: """simple docstring""" if args.calibrator == "max": __lowerCamelCase = "max" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) __lowerCamelCase = "histogram" elif args.calibrator == "mse": __lowerCamelCase = "histogram" else: raise ValueError(F"""Invalid calibrator {args.calibrator}""" ) __lowerCamelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_lowercase ) __lowerCamelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_lowercase ) quant_nn.QuantLinear.set_default_quant_desc_weight(_lowercase ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=False ) -> Optional[int]: """simple docstring""" logger.info('Configuring Model for Quantization' ) logger.info(F"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_lowercase , ['embeddings'] , which='weight' , _disabled=_lowercase ) if args.quant_disable: set_quantizer_by_name(_lowercase , [''] , _disabled=_lowercase ) if args.quant_disable_keyword: set_quantizer_by_name(_lowercase , args.quant_disable_keyword , _disabled=_lowercase ) if args.quant_disable_layer_module: set_quantizer_by_name(_lowercase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_lowercase ) if args.quant_enable_layer_module: set_quantizer_by_name(_lowercase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_lowercase ) if args.recalibrate_weights: recalibrate_weights(_lowercase ) if args.fuse_qkv: fuse_qkv(_lowercase , _lowercase ) if args.clip_gelu: clip_gelu(_lowercase , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_lowercase ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F"""{name:80}: {module}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> Dict: """simple docstring""" logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_lowercase ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" def fusea(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_lowercase , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return __lowerCamelCase = qq._amax.detach().item() __lowerCamelCase = qk._amax.detach().item() __lowerCamelCase = qv._amax.detach().item() __lowerCamelCase = max(_lowercase , _lowercase , _lowercase ) qq._amax.fill_(_lowercase ) qk._amax.fill_(_lowercase ) qv._amax.fill_(_lowercase ) logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): __lowerCamelCase = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_lowercase ) __lowerCamelCase = mod._input_quantizer._amax.data.detach().item() logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" for name, mod in model.named_modules(): if hasattr(_lowercase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: __lowerCamelCase = mod.weight.shape[0] __lowerCamelCase = mod._weight_quantizer._amax.detach() __lowerCamelCase = torch.ones(_lowercase , dtype=amax.dtype , device=amax.device ) * amax print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" for name, mod in model.named_modules(): if hasattr(_lowercase , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) __lowerCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) __lowerCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set __lowerCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowercase , keepdims=_lowercase ).detach() logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) __lowerCamelCase = amax def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=25 , UpperCamelCase__ : List[str]=180 , UpperCamelCase__ : List[str]=None ) -> Optional[int]: """simple docstring""" if ignore is None: __lowerCamelCase = [] elif not isinstance(_lowercase , _lowercase ): __lowerCamelCase = [ignore] __lowerCamelCase = 0 for name, mod in model.named_modules(): if not hasattr(_lowercase , 'weight' ): continue __lowerCamelCase = max(_lowercase , len(_lowercase ) ) for name, mod in model.named_modules(): __lowerCamelCase = getattr(_lowercase , '_input_quantizer' , _lowercase ) __lowerCamelCase = getattr(_lowercase , '_weight_quantizer' , _lowercase ) if not hasattr(_lowercase , 'weight' ): continue if type(_lowercase ) in ignore: continue if [True for s in ignore if type(_lowercase ) is str and s in name]: continue __lowerCamelCase = F"""Act:{input_q.extra_repr()}""" __lowerCamelCase = F"""Wgt:{weight_q.extra_repr()}""" __lowerCamelCase = F"""{name:{name_width}} {act_str} {wgt_str}""" if len(_lowercase ) <= line_width: logger.info(_lowercase ) else: logger.info(F"""{name:{name_width}} {act_str}""" ) logger.info(F"""{' ':{name_width}} {wgt_str}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" __lowerCamelCase = 0 for name, mod in model.named_modules(): if isinstance(_lowercase , pytorch_quantization.nn.TensorQuantizer ): print(F"""{name:80} {mod}""" ) count += 1 print(F"""{count} TensorQuantizers found in model""" ) def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Dict: """simple docstring""" __lowerCamelCase = getattr(_lowercase , _lowercase , _lowercase ) if quantizer_mod is not None: assert hasattr(_lowercase , _lowercase ) setattr(_lowercase , _lowercase , _lowercase ) else: logger.warning(F"""{name} has no {quantizer}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple="both" , **UpperCamelCase__ : Tuple ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = F"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += F""" {k}={v}""" if which in ["input", "both"]: set_quantizer(_lowercase , _lowercase , '_input_quantizer' , _lowercase , _lowercase ) if which in ["weight", "both"]: set_quantizer(_lowercase , _lowercase , '_weight_quantizer' , _lowercase , _lowercase ) logger.info(_lowercase ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" for name, mod in model.named_modules(): if hasattr(_lowercase , '_input_quantizer' ) or hasattr(_lowercase , '_weight_quantizer' ): for n in names: if re.search(_lowercase , _lowercase ): set_quantizers(_lowercase , _lowercase , **_lowercase ) elif name.endswith('_quantizer' ): for n in names: if re.search(_lowercase , _lowercase ): __lowerCamelCase = F"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += F""" {k}={v}""" setattr(_lowercase , _lowercase , _lowercase ) logger.info(_lowercase )
363
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) __lowerCamelCase = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __lowerCamelCase = 1 if upper_limit > 0: __lowerCamelCase = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(UpperCamelCase__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: __A = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
364
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = [False] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ): __lowerCamelCase = True __lowerCamelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
348
0
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class __lowerCAmelCase ( __lowerCAmelCase ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None: '''simple docstring''' __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCamelCase = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) __lowerCamelCase = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCamelCase = '<|endoftext|>' if eos_token is None else eos_token __lowerCamelCase = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCamelCase = unk_token if pad_token is None else pad_token __lowerCamelCase = eos_token if bos_token is None else bos_token else: __lowerCamelCase = '<pad>' if pad_token is None else pad_token __lowerCamelCase = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) __lowerCamelCase = do_lower_case __lowerCamelCase = remove_space __lowerCamelCase = keep_accents __lowerCamelCase = vocab_file __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCamelCase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCamelCase = re.compile( f"""[{''.join(map(lowerCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" ) def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowercase_ ( self ) -> int: '''simple docstring''' return len(self.sp_model ) def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = self.non_printing_characters_re.sub('' , lowerCAmelCase_ ) # Normalize whitespaces __lowerCamelCase = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization __lowerCamelCase = unicodedata.normalize('NFC' , lowerCAmelCase_ ) return text def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.preprocess_text(lowerCAmelCase_ ) return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' return self.sp_model.PieceToId(lowerCAmelCase_ ) def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' return self.sp_model.IdToPiece(lowerCAmelCase_ ) @staticmethod def lowercase_ ( lowerCamelCase__ ) -> str: '''simple docstring''' return out_string def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = '' __lowerCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase_ ) + token __lowerCamelCase = True __lowerCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase_ ) __lowerCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase_ ) return out_string def lowercase_ ( self ) -> Dict[str, int]: '''simple docstring''' __lowerCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , 'wb' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCamelCase = self.preprocess_text(lowerCAmelCase_ ) __lowerCamelCase = self.sp_model.encode(lowerCAmelCase_ ) else: __lowerCamelCase = [self.preprocess_text(lowerCAmelCase_ ) for t in text] __lowerCamelCase = self.sp_model.encode(lowerCAmelCase_ ) if return_tensors is True or return_tensors == "pt": __lowerCamelCase = torch.tensor(lowerCAmelCase_ ) return token_ids def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' return self.sp_model.decode(lowerCAmelCase_ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[int]: '''simple docstring''' __lowerCamelCase = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCamelCase = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(lowerCAmelCase_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=lowerCAmelCase_ )
365
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids __lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : int ) -> str: """simple docstring""" if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate __lowerCamelCase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __lowerCamelCase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
366
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCamelCase = [3, 3, 3, 3] __lowerCamelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCamelCase = [4, 4, 4, 4] __lowerCamelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCamelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCamelCase = [3, 3, 3, 3] else: __lowerCamelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCamelCase = 96 elif "small" in model_name: __lowerCamelCase = 96 elif "base" in model_name: __lowerCamelCase = 128 elif "large" in model_name: __lowerCamelCase = 192 elif "xlarge" in model_name: __lowerCamelCase = 256 elif "huge" in model_name: __lowerCamelCase = 352 # set label information __lowerCamelCase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowerCamelCase = 'imagenet-22k-id2label.json' else: __lowerCamelCase = 'imagenet-1k-id2label.json' __lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) __lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = FocalNetConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , ) return config def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str: """simple docstring""" if "patch_embed.proj" in name: __lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowerCamelCase = 'encoder.' + name if "encoder.layers" in name: __lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowerCamelCase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowerCamelCase = 'layernorm.weight' if name == "norm.bias": __lowerCamelCase = 'layernorm.bias' if "head" in name: __lowerCamelCase = name.replace('head' , 'classifier' ) else: __lowerCamelCase = 'focalnet.' + name return name def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict: """simple docstring""" __lowerCamelCase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowerCamelCase = model_name_to_url[model_name] print('Checkpoint URL: ' , UpperCamelCase__ ) __lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = val __lowerCamelCase = get_focalnet_config(UpperCamelCase__ ) __lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCamelCase__ ) # verify conversion __lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase = BitImageProcessor( do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , ) __lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) __lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ) __lowerCamelCase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) __lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 ) __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ) elif model_name == "focalnet-tiny-lrf": __lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] ) elif model_name == "focalnet-small": __lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] ) elif model_name == "focalnet-small-lrf": __lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] ) elif model_name == "focalnet-base": __lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] ) elif model_name == "focalnet-base-lrf": __lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] ) assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) __A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
0
"""simple docstring""" from collections.abc import Generator def lowerCamelCase_ ( ) -> Generator[int, None, None]: """simple docstring""" __lowerCamelCase = 0, 1 while True: __lowerCamelCase = b, a + b yield b def lowerCamelCase_ ( UpperCamelCase__ : List[str] = 1000 ) -> int: """simple docstring""" __lowerCamelCase = 1 __lowerCamelCase = fibonacci_generator() while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
367
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float: """simple docstring""" __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __A = [float(x) for x in input("Enter the elements of first array: ").split()] __A = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
348
0
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowerCamelCase = TOKENIZER_CLASSES else: __lowerCamelCase = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowerCamelCase = TOKENIZER_CLASSES[tokenizer_name] __lowerCamelCase = True if checkpoint_name is None: __lowerCamelCase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowerCamelCase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowerCamelCase = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowerCamelCase = checkpoint.split('/' ) __lowerCamelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) elif add_prefix: __lowerCamelCase = checkpoint __lowerCamelCase = dump_path else: __lowerCamelCase = None __lowerCamelCase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowerCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowerCamelCase = file_path.split(lowerCamelCase_ )[-1][0] if next_char == "/": __lowerCamelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) __lowerCamelCase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowerCamelCase = tokenizer.save_pretrained( lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(lowerCamelCase_ ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) __A = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
368
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=100 , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=[0, 1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = 100 __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = out_indices __lowerCamelCase = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = num_patches + 1 def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ) -> Any: '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = BeitModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __lowerCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = BeitForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __lowerCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase = self.type_sequence_label_size __lowerCamelCase = BeitForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __lowerCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase = 1 __lowerCamelCase = BeitForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = BeitForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __lowerCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) __lowerCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __a , __a , unittest.TestCase ): """simple docstring""" snake_case_ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = BeitModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowercase_ ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def lowercase_ ( self ) -> str: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(UpperCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]: continue __lowerCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() __lowerCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __lowerCamelCase = model(**UpperCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCamelCase = False __lowerCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue __lowerCamelCase = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() __lowerCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __lowerCamelCase = model(**UpperCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: __lowerCamelCase = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def lowercase_ ( self ) -> Tuple: '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = BeitModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> int: '''simple docstring''' return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(UpperCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values.to(UpperCamelCase__ ) # prepare bool_masked_pos __lowerCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ ) __lowerCamelCase = outputs.logits # verify the logits __lowerCamelCase = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , UpperCamelCase__ ) __lowerCamelCase = torch.tensor( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) ) @slow def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(UpperCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits # verify the logits __lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , UpperCamelCase__ ) __lowerCamelCase = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) __lowerCamelCase = 281 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( UpperCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits # verify the logits __lowerCamelCase = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , UpperCamelCase__ ) __lowerCamelCase = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) __lowerCamelCase = 2_396 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __lowerCamelCase = model.to(UpperCamelCase__ ) __lowerCamelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ ) __lowerCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __lowerCamelCase = Image.open(ds[0]['file'] ) __lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits # verify the logits __lowerCamelCase = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , UpperCamelCase__ ) __lowerCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: __lowerCamelCase = torch.tensor( [ [[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]], [[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]], [[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]], ] , device=UpperCamelCase__ , ) else: __lowerCamelCase = torch.tensor( [ [[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]], [[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]], [[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __lowerCamelCase = model.to(UpperCamelCase__ ) __lowerCamelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ ) __lowerCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __lowerCamelCase = Image.open(ds[0]['file'] ) __lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits.detach().cpu() __lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) __lowerCamelCase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) __lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) __lowerCamelCase = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
369
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''philschmid/bart-large-cnn-samsum''' snake_case_ = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) snake_case_ = '''summarizer''' snake_case_ = AutoTokenizer snake_case_ = AutoModelForSeqaSeqLM snake_case_ = ['''text'''] snake_case_ = ['''text'''] def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' return self.model.generate(**lowerCamelCase__ )[0] def lowercase_ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
348
0
__A = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __A = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __A = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" assert len(str(snake_case_ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowerCamelCase = year // 100 __lowerCamelCase = (5 * (century % 4) + 2) % 7 __lowerCamelCase = year % 100 __lowerCamelCase = centurian % 12 __lowerCamelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowerCamelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowerCamelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
370
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = True snake_case_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] __lowerCamelCase = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. __lowerCamelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
348
0
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys __A = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
371
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] )
348
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = {"vocab_file": "sentencepiece.bpe.model"} __A = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), } } __A = { "facebook/mbart-large-en-ro": 10_24, "facebook/mbart-large-cc25": 10_24, } # fmt: off __A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class __lowerCAmelCase ( snake_case_ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] snake_case_ = [] def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Any: '''simple docstring''' __lowerCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) __lowerCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowerCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowerCamelCase = 1 __lowerCamelCase = len(self.sp_model ) __lowerCamelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A ) } __lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()} __lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowerCamelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __lowerCamelCase = src_lang if src_lang is not None else 'en_XX' __lowerCamelCase = self.lang_code_to_id[self._src_lang] __lowerCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None __lowerCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase_ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) __lowerCamelCase = [1] * len(self.prefix_tokens ) __lowerCamelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_A )) + suffix_ones return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __lowerCamelCase = src_lang __lowerCamelCase = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) __lowerCamelCase = self.convert_tokens_to_ids(_A ) __lowerCamelCase = tgt_lang_id return inputs def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_A , out_type=_A ) def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCamelCase = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = ''.join(_A ).replace(_A , ' ' ).strip() return out_string def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , 'wb' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = src_lang __lowerCamelCase = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def lowercase_ ( self ) -> str: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def lowercase_ ( self ) -> Any: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = self.lang_code_to_id[src_lang] __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = self.lang_code_to_id[lang] __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
350
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase_ ( self , lowerCamelCase__=0 ) -> int: '''simple docstring''' __lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) ) __lowerCamelCase = np.random.RandomState(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # warmup pass to apply optimizations __lowerCamelCase = pipe(**self.get_dummy_inputs() ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def lowercase_ ( self ) -> int: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = ort.SessionOptions() __lowerCamelCase = False return options def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) __lowerCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
348
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" __A = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" __A = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , lowerCamelCase__=False ) -> List[str]: '''simple docstring''' __lowerCamelCase = compute_bleu( reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ ) (__lowerCamelCase) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
351
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
348
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] __A = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] __A = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): __A = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
352
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxGPTJModelTester(self ) def lowercase_ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @tooslow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ ) __lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowercase_ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
348
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __A = """bart""" __A = True @st.cache(allow_output_mutation=UpperCamelCase__ ) def lowerCamelCase_ ( ) -> int: """simple docstring""" if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __lowerCamelCase = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __lowerCamelCase = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=UpperCamelCase__ ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __lowerCamelCase = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(UpperCamelCase__ , 1 , UpperCamelCase__ ) wikiaab_gpu_index_flat.add(UpperCamelCase__ ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=UpperCamelCase__ ) def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __lowerCamelCase = elia['train_eli5'] __lowerCamelCase = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(UpperCamelCase__ ) return (elia_train, eli5_train_q_index) __A = load_indexes() __A = load_models() __A = load_train_data() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=10 ) -> Dict: """simple docstring""" __lowerCamelCase = embed_questions_for_retrieval([question] , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = [elia_train[int(UpperCamelCase__ )] for i in I[0]] return nn_examples def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]="wiki40b" , UpperCamelCase__ : str="dense" , UpperCamelCase__ : Dict=10 ) -> List[Any]: """simple docstring""" if source == "none": __lowerCamelCase , __lowerCamelCase = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( UpperCamelCase__ , UpperCamelCase__ , index_name='english_wiki40b_snippets_100w' , n_results=UpperCamelCase__ , ) __lowerCamelCase = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __lowerCamelCase = 'question: {} context: {}'.format(UpperCamelCase__ , UpperCamelCase__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda UpperCamelCase__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase__ : None), } ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=64 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=0.95 , UpperCamelCase__ : Dict=0.8 ) -> Dict: """simple docstring""" with torch.no_grad(): __lowerCamelCase = qa_sas_generate( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , num_answers=1 , num_beams=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ , do_sample=UpperCamelCase__ , temp=UpperCamelCase__ , top_p=UpperCamelCase__ , top_k=UpperCamelCase__ , max_input_length=1024 , device='cuda:0' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar __A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" __A = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __A = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) __A = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] __A = st.sidebar.checkbox("Demo options") if demo_options: __A = st.sidebar.selectbox( "", action_list, index=3, ) __A = action_list.index(action_st) __A = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) __A = show_type == """Show full text of passages""" else: __A = 3 __A = True __A = st.sidebar.checkbox("Retrieval options") if retrieval_options: __A = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) __A = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) __A = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: __A = """wiki40b""" __A = """dense""" __A = """beam""" __A = 2 __A = 64 __A = 2_56 __A = None __A = None __A = st.sidebar.checkbox("Generation options") if generate_options: __A = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) __A = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) __A = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) __A = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": __A = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __A = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __A = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __A = None # start main text __A = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] __A = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": __A = st.text_input("Enter your question here:", "") else: __A = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": __A = make_support(question, source=wiki_source, method="dense", n_results=10) __A = make_support(question, source=wiki_source, method="sparse", n_results=10) __A = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __A = support_list[:10] __A = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: __A = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __A = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): __A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(" ", "_")) __A = res[1].strip() if sec_titles == "": __A = """[{}]({})""".format(res[0], wiki_url) else: __A = sec_titles.split(" & ") __A = """ & """.join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: __A = find_nearest_training(question) __A = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) __A = [ """{}. {}""".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) __A = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
353
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __A = False class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> int: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = 12 __lowerCamelCase = 12 __lowerCamelCase = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } __lowerCamelCase = TransformeraDModel(**lowerCamelCase__ ) return model def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) __lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
354
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : Tuple = 1000 ) -> Optional[int]: """simple docstring""" __lowerCamelCase , __lowerCamelCase = 1, 1 __lowerCamelCase = 2 while True: __lowerCamelCase = 0 __lowerCamelCase = fa + fa __lowerCamelCase , __lowerCamelCase = fa, f index += 1 for _ in str(UpperCamelCase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
355
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowerCamelCase = CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = backbone_config.pop('model_type' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def lowercase_ ( self ) -> Dict[str, any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
348
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = '''vit_msn''' def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**__lowerCamelCase ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = qkv_bias
356
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , ) -> Dict: '''simple docstring''' __lowerCamelCase = size if size is not None else {'shortest_edge': 20} __lowerCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size def lowercase_ ( self ) -> Any: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = MobileNetVaImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MobileNetVaImageProcessingTester(self ) @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'do_center_crop' ) ) self.assertTrue(hasattr(_A , 'crop_size' ) ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowercase_ ( self ) -> int: '''simple docstring''' pass def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ) -> Dict: '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
357
import sys from collections import defaultdict class __lowerCAmelCase : """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.node_position[vertex] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pos def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): __lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): __lowerCamelCase = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A = logging.get_logger(__name__) __A = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = '''gpt_neo''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCamelCase__=50_257 , lowerCamelCase__=2_048 , lowerCamelCase__=2_048 , lowerCamelCase__=24 , lowerCamelCase__=[[["global", "local"], 12]] , lowerCamelCase__=16 , lowerCamelCase__=None , lowerCamelCase__=256 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = num_layers __lowerCamelCase = num_heads __lowerCamelCase = intermediate_size __lowerCamelCase = window_size __lowerCamelCase = activation_function __lowerCamelCase = resid_dropout __lowerCamelCase = embed_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = classifier_dropout __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_range __lowerCamelCase = use_cache __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id __lowerCamelCase = attention_types __lowerCamelCase = self.expand_attention_types_params(__snake_case ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ f"""`config.num_layers = {self.num_layers}`. """ '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) @staticmethod def lowercase_ ( lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> Optional[int]: """simple docstring""" import torch __lowerCamelCase = input.size() __lowerCamelCase = len(_A ) __lowerCamelCase = shape[dimension] __lowerCamelCase = torch.arange(0 , _A , _A ) __lowerCamelCase = torch.div(sizedim - size , _A , rounding_mode='floor' ) + 1 __lowerCamelCase = torch.arange(_A ) + low_indices[:min_length][:, None] __lowerCamelCase = [slice(_A )] * rank __lowerCamelCase = indices __lowerCamelCase = input[s] __lowerCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_A ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Union[str, Any]: """simple docstring""" import torch __lowerCamelCase = torch.arange(1 , _A ) __lowerCamelCase = torch.remainder(_A , _A ) __lowerCamelCase = remainders == 0 __lowerCamelCase = candidates[divisor_indices] __lowerCamelCase = torch.max(_A ) return largest_divisor, torch.div(_A , _A , rounding_mode='floor' ) class __lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) __lowerCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __lowerCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowercase_ ( self ) -> int: '''simple docstring''' return self._config.num_heads def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = super(__snake_case , self ).generate_dummy_inputs( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) # We need to order the input in the way they appears in the forward() __lowerCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowerCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowerCamelCase = seqlen + 2 __lowerCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCamelCase = [ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers ) ] __lowerCamelCase = common_inputs['attention_mask'] if self.use_past: __lowerCamelCase = ordered_inputs['attention_mask'].dtype __lowerCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) return ordered_inputs @property def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' return 13
358
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case_ = "question" snake_case_ = "context" snake_case_ = "answers" @property def lowercase_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
0
from __future__ import annotations import typing from collections import Counter def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ): """simple docstring""" __lowerCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(lowerCAmelCase__ , max_perimeter + 1 ): __lowerCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(lowerCAmelCase__ ): __lowerCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] = 1000 ): """simple docstring""" __lowerCamelCase = pythagorean_triple(lowerCAmelCase__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'''Perimeter {solution()} has maximum solutions''')
359
import requests __A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None: """simple docstring""" __lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
348
0
__A = "Tobias Carryer" from time import time class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=int(time() ) ) -> List[str]: # noqa: B008 '''simple docstring''' __lowerCamelCase = multiplier __lowerCamelCase = increment __lowerCamelCase = modulo __lowerCamelCase = seed def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __A = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
360
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
0
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __A = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __A = concatenate_datasets __A = DownloadConfig __A = DownloadManager __A = DownloadMode __A = DownloadConfig __A = DownloadMode __A = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
361
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if "emb" in name: __lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: __lowerCamelCase = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: __lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: __lowerCamelCase = name.replace('linear1' , 'fc1' ) if "linear2" in name: __lowerCamelCase = name.replace('linear2' , 'fc2' ) if "norm1" in name: __lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: __lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: __lowerCamelCase = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: __lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: __lowerCamelCase = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]: """simple docstring""" __lowerCamelCase = list(state_dict.keys() ) __lowerCamelCase = {} for key in keys: __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __lowerCamelCase = val[:hidden_size, :] __lowerCamelCase = val[hidden_size : 2 * hidden_size, :] __lowerCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCamelCase = val else: __lowerCamelCase = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __lowerCamelCase = 1024 __lowerCamelCase = 24 __lowerCamelCase = 16 elif checkpoint == "medium": __lowerCamelCase = 1536 __lowerCamelCase = 48 __lowerCamelCase = 24 elif checkpoint == "large": __lowerCamelCase = 2048 __lowerCamelCase = 48 __lowerCamelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) __lowerCamelCase = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]: """simple docstring""" __lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ ) __lowerCamelCase = fairseq_model.lm.state_dict() __lowerCamelCase , __lowerCamelCase = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' ) __lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' ) __lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('Incorrect shape for logits' ) # now construct the processor __lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) __lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __lowerCamelCase = 2048 __lowerCamelCase = 2048 # set other default generation config params __lowerCamelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCamelCase = True __lowerCamelCase = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __A = logging.get_logger(__name__) __A = { "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''blip_2_vision_model''' def __init__( self , lowerCamelCase__=1_408 , lowerCamelCase__=6_144 , lowerCamelCase__=39 , lowerCamelCase__=16 , lowerCamelCase__=224 , lowerCamelCase__=14 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0_00_01 , lowerCamelCase__=0.0 , lowerCamelCase__=1e-10 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> str: '''simple docstring''' super().__init__(**__a ) __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = patch_size __lowerCamelCase = image_size __lowerCamelCase = initializer_range __lowerCamelCase = attention_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = hidden_act __lowerCamelCase = qkv_bias @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__a ) __lowerCamelCase , __lowerCamelCase = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": __lowerCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''blip_2_qformer''' def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=2 , lowerCamelCase__=1_408 , **lowerCamelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = cross_attention_frequency __lowerCamelCase = encoder_hidden_size @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__a ) __lowerCamelCase , __lowerCamelCase = cls.get_config_dict(__a , **__a ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": __lowerCamelCase = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''blip-2''' snake_case_ = True def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=32 , **lowerCamelCase__ ) -> Any: '''simple docstring''' super().__init__(**__a ) if vision_config is None: __lowerCamelCase = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: __lowerCamelCase = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: __lowerCamelCase = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __lowerCamelCase = BlipaVisionConfig(**__a ) __lowerCamelCase = BlipaQFormerConfig(**__a ) __lowerCamelCase = text_config['model_type'] if 'model_type' in text_config else 'opt' __lowerCamelCase = CONFIG_MAPPING[text_model_type](**__a ) __lowerCamelCase = self.text_config.tie_word_embeddings __lowerCamelCase = self.text_config.is_encoder_decoder __lowerCamelCase = num_query_tokens __lowerCamelCase = self.vision_config.hidden_size __lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __lowerCamelCase = 1.0 __lowerCamelCase = 0.02 @classmethod def lowercase_ ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__a , ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.vision_config.to_dict() __lowerCamelCase = self.qformer_config.to_dict() __lowerCamelCase = self.text_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
362
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''sew-d''' def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = max_position_embeddings __lowerCamelCase = position_buckets __lowerCamelCase = share_att_key __lowerCamelCase = relative_attention __lowerCamelCase = norm_rel_ebd __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = feature_layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def lowercase_ ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
0
from __future__ import annotations from random import random class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ = None ) -> List[Any]: '''simple docstring''' __lowerCamelCase = value __lowerCamelCase = random() __lowerCamelCase = None __lowerCamelCase = None def __repr__( self ) -> str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f"""\'{self.value}: {self.prior:.5}\'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: '''simple docstring''' __lowerCamelCase = str(self.value ) + ' ' __lowerCamelCase = str(self.left or '' ) __lowerCamelCase = str(self.right or '' ) return value + left + right def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> tuple[Node | None, Node | None]: """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __lowerCamelCase , __lowerCamelCase = split(root.left , UpperCAmelCase__ ) return left, root else: __lowerCamelCase , __lowerCamelCase = split(root.right , UpperCAmelCase__ ) return root, right def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Node | None: """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __lowerCamelCase = merge(left.right , UpperCAmelCase__ ) return left else: __lowerCamelCase = merge(UpperCAmelCase__ , right.left ) return right def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Node | None: """simple docstring""" __lowerCamelCase = Node(UpperCAmelCase__ ) __lowerCamelCase , __lowerCamelCase = split(UpperCAmelCase__ , UpperCAmelCase__ ) return merge(merge(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ) -> Node | None: """simple docstring""" __lowerCamelCase , __lowerCamelCase = split(UpperCAmelCase__ , value - 1 ) __lowerCamelCase , __lowerCamelCase = split(UpperCAmelCase__ , UpperCAmelCase__ ) return merge(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> None: """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Node | None: """simple docstring""" for arg in args.split(): if arg[0] == "+": __lowerCamelCase = insert(UpperCAmelCase__ , int(arg[1:] ) ) elif arg[0] == "-": __lowerCamelCase = erase(UpperCAmelCase__ , int(arg[1:] ) ) else: print('Unknown command' ) return root def lowerCamelCase_ ( ) -> None: """simple docstring""" __lowerCamelCase = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) __lowerCamelCase = input() while args != "q": __lowerCamelCase = interact_treap(UpperCAmelCase__ , UpperCAmelCase__ ) print(UpperCAmelCase__ ) __lowerCamelCase = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
363
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
348
0
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" if openai_config_file == "": __lowerCamelCase = OpenAIGPTConfig() else: __lowerCamelCase = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase = OpenAIGPTModel(SCREAMING_SNAKE_CASE_ ) # Load weights from numpy load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model __lowerCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME __lowerCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) __A = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
364
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = [False] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ): __lowerCamelCase = True __lowerCamelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
348
0
def lowerCamelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(snake_case__ ) ) def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: """simple docstring""" if index == len(snake_case__ ): return True # Recursive Step for i in range(snake_case__ ): if valid_coloring(graph[index] , snake_case__ , snake_case__ ): # Color current vertex __lowerCamelCase = i # Validate coloring if util_color(snake_case__ , snake_case__ , snake_case__ , index + 1 ): return True # Backtrack __lowerCamelCase = -1 return False def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ) -> list[int]: """simple docstring""" __lowerCamelCase = [-1] * len(snake_case__ ) if util_color(snake_case__ , snake_case__ , snake_case__ , 0 ): return colored_vertices return []
365
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids __lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __lowerCAmelCase : """simple docstring""" @property def lowercase_ ( self ) -> int: '''simple docstring''' return self.get_dummy_input() @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" ) def lowercase_ ( self , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ) -> Any: '''simple docstring''' __lowerCamelCase = 4 __lowerCamelCase = 32 __lowerCamelCase = (32, 32) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = torch.device(lowerCamelCase__ ) __lowerCamelCase = (batch_size, num_channels) + sizes __lowerCamelCase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ ) __lowerCamelCase = {'hidden_states': hidden_states} if include_temb: __lowerCamelCase = 128 __lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=lowerCamelCase__ , device=lowerCamelCase__ ) if include_res_hidden_states_tuple: __lowerCamelCase = torch.manual_seed(1 ) __lowerCamelCase = (randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ ),) if include_encoder_hidden_states: __lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(lowerCamelCase__ ) if include_skip_sample: __lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCamelCase__ , device=lowerCamelCase__ ) return dummy_input def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": __lowerCamelCase = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) __lowerCamelCase = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common() __lowerCamelCase = self.block_class(**lowerCamelCase__ ) unet_block.to(lowerCamelCase__ ) unet_block.eval() with torch.no_grad(): __lowerCamelCase = unet_block(**lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) __lowerCamelCase = output[0, -1, -3:, -3:] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) assert torch_all_close(output_slice.flatten() , lowerCamelCase__ , atol=5e-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common() __lowerCamelCase = self.block_class(**lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(**lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = output[0] __lowerCamelCase = torch.device(lowerCamelCase__ ) __lowerCamelCase = randn_tensor(output.shape , device=lowerCamelCase__ ) __lowerCamelCase = torch.nn.functional.mse_loss(lowerCamelCase__ , lowerCamelCase__ ) loss.backward()
366
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCamelCase = [3, 3, 3, 3] __lowerCamelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCamelCase = [4, 4, 4, 4] __lowerCamelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCamelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCamelCase = [3, 3, 3, 3] else: __lowerCamelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCamelCase = 96 elif "small" in model_name: __lowerCamelCase = 96 elif "base" in model_name: __lowerCamelCase = 128 elif "large" in model_name: __lowerCamelCase = 192 elif "xlarge" in model_name: __lowerCamelCase = 256 elif "huge" in model_name: __lowerCamelCase = 352 # set label information __lowerCamelCase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowerCamelCase = 'imagenet-22k-id2label.json' else: __lowerCamelCase = 'imagenet-1k-id2label.json' __lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) __lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = FocalNetConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , ) return config def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str: """simple docstring""" if "patch_embed.proj" in name: __lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowerCamelCase = 'encoder.' + name if "encoder.layers" in name: __lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowerCamelCase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowerCamelCase = 'layernorm.weight' if name == "norm.bias": __lowerCamelCase = 'layernorm.bias' if "head" in name: __lowerCamelCase = name.replace('head' , 'classifier' ) else: __lowerCamelCase = 'focalnet.' + name return name def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict: """simple docstring""" __lowerCamelCase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowerCamelCase = model_name_to_url[model_name] print('Checkpoint URL: ' , UpperCamelCase__ ) __lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = val __lowerCamelCase = get_focalnet_config(UpperCamelCase__ ) __lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCamelCase__ ) # verify conversion __lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase = BitImageProcessor( do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , ) __lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) __lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ) __lowerCamelCase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) __lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 ) __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ) elif model_name == "focalnet-tiny-lrf": __lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] ) elif model_name == "focalnet-small": __lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] ) elif model_name == "focalnet-small-lrf": __lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] ) elif model_name == "focalnet-base": __lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] ) elif model_name == "focalnet-base-lrf": __lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] ) assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) __A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
0
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __lowerCAmelCase : """simple docstring""" snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=20 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __lowerCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCamelCase = prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase ) return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(__lowercase ) __lowerCamelCase = model.encode(inputs_dict['input_ids'] ) __lowerCamelCase = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase ) __lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCamelCase = model.decode( decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , ) __lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model.decode( decoder_input_ids[:, -1:] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowercase , ) __lowerCamelCase = model.decode(__lowercase , __lowercase ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(__lowercase ) __lowerCamelCase = model.encode(inputs_dict['input_ids'] ) __lowerCamelCase = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __lowerCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowerCamelCase = model.decode( decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , ) __lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model.decode( decoder_input_ids[:, -1:] , __lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowercase , decoder_position_ids=__lowercase , ) __lowerCamelCase = model.decode(__lowercase , __lowercase , decoder_attention_mask=__lowercase ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=None , ) -> Tuple: """simple docstring""" if attention_mask is None: __lowerCamelCase = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __lowerCamelCase = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" snake_case_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxPegasusModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = self._prepare_for_class(__lowercase , __lowercase ) __lowerCamelCase = model_class(__lowercase ) @jax.jit def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ): return model.encode(input_ids=__lowercase , attention_mask=__lowercase ) with self.subTest('JIT Enabled' ): __lowerCamelCase = encode_jitted(**__lowercase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase = encode_jitted(**__lowercase ).to_tuple() self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for jitted_output, output in zip(__lowercase , __lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = model_class(__lowercase ) __lowerCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) __lowerCamelCase = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): return model.decode( decoder_input_ids=__lowercase , decoder_attention_mask=__lowercase , encoder_outputs=__lowercase , ) with self.subTest('JIT Enabled' ): __lowerCamelCase = decode_jitted(**__lowercase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase = decode_jitted(**__lowercase ).to_tuple() self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for jitted_output, output in zip(__lowercase , __lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=__lowercase ) __lowerCamelCase = np.ones((1, 1) ) __lowerCamelCase = model(__lowercase ) self.assertIsNotNone(__lowercase ) @slow def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) __lowerCamelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) __lowerCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] __lowerCamelCase = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] __lowerCamelCase = tokenizer(__lowercase , return_tensors='np' , truncation=__lowercase , max_length=512 , padding=__lowercase ) __lowerCamelCase = model.generate(**__lowercase , num_beams=2 ).sequences __lowerCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase ) assert tgt_text == decoded
367
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float: """simple docstring""" __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __A = [float(x) for x in input("Enter the elements of first array: ").split()] __A = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
348
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" snake_case_ = ["""pixel_values"""] def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**__a ) __lowerCamelCase = size if size is not None else {'height': 224, 'width': 224} __lowerCamelCase = get_size_dict(__a ) __lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224} __lowerCamelCase = get_size_dict(__a , default_to_square=__a , param_name='crop_size' ) __lowerCamelCase = do_resize __lowerCamelCase = do_rescale __lowerCamelCase = do_normalize __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = rescale_factor __lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = get_size_dict(__a ) if "shortest_edge" in size: __lowerCamelCase = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __lowerCamelCase = (size['height'], size['width']) else: raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Any: '''simple docstring''' __lowerCamelCase = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> Dict: '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__a , param_name='crop_size' , default_to_square=__a ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__a ) if not is_batched(__a ): __lowerCamelCase = [images] if not valid_images(__a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__a ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__a , __a ) for image in images] __lowerCamelCase = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a )
368
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
369
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''philschmid/bart-large-cnn-samsum''' snake_case_ = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) snake_case_ = '''summarizer''' snake_case_ = AutoTokenizer snake_case_ = AutoModelForSeqaSeqLM snake_case_ = ['''text'''] snake_case_ = ['''text'''] def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' return self.model.generate(**lowerCamelCase__ )[0] def lowercase_ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
348
0
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = "Hello, World!" __A = "en_XX" def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Optional[int]: """simple docstring""" __lowerCamelCase = Path('data_bin' ) __lowerCamelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(__SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(__SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(__SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , ) xmod.eval() # disable dropout print(__SCREAMING_SNAKE_CASE ) __lowerCamelCase = xmod.model.encoder.sentence_encoder __lowerCamelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __lowerCamelCase = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print('Our X-MOD config:' , __SCREAMING_SNAKE_CASE ) __lowerCamelCase = XmodForSequenceClassification(__SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(__SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCamelCase = xmod_sent_encoder.embed_tokens.weight __lowerCamelCase = xmod_sent_encoder.embed_positions.weight __lowerCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight __lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCamelCase = model.roberta.encoder.layer[i] __lowerCamelCase = xmod_sent_encoder.layers[i] # self attention __lowerCamelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('Dimensions of self-attention weights do not match.' ) __lowerCamelCase = xmod_layer.self_attn.q_proj.weight __lowerCamelCase = xmod_layer.self_attn.q_proj.bias __lowerCamelCase = xmod_layer.self_attn.k_proj.weight __lowerCamelCase = xmod_layer.self_attn.k_proj.bias __lowerCamelCase = xmod_layer.self_attn.v_proj.weight __lowerCamelCase = xmod_layer.self_attn.v_proj.bias # self-attention output __lowerCamelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('Dimensions of self-attention output weights do not match.' ) __lowerCamelCase = xmod_layer.self_attn.out_proj.weight __lowerCamelCase = xmod_layer.self_attn.out_proj.bias __lowerCamelCase = xmod_layer.self_attn_layer_norm.weight __lowerCamelCase = xmod_layer.self_attn_layer_norm.bias # intermediate __lowerCamelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of intermediate weights do not match.' ) __lowerCamelCase = xmod_layer.fca.weight __lowerCamelCase = xmod_layer.fca.bias # output __lowerCamelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of feed-forward weights do not match.' ) __lowerCamelCase = xmod_layer.fca.weight __lowerCamelCase = xmod_layer.fca.bias __lowerCamelCase = xmod_layer.final_layer_norm.weight __lowerCamelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __lowerCamelCase = xmod_layer.adapter_layer_norm.weight __lowerCamelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('Lists of language adapters do not match.' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __lowerCamelCase = bert_output.adapter_modules[lang_code] __lowerCamelCase = xmod_layer.adapter_modules[lang_code] __lowerCamelCase = from_adapter.fca.weight __lowerCamelCase = from_adapter.fca.bias __lowerCamelCase = from_adapter.fca.weight __lowerCamelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __lowerCamelCase = xmod_sent_encoder.layer_norm.weight __lowerCamelCase = xmod_sent_encoder.layer_norm.bias if classification_head: __lowerCamelCase = xmod.model.classification_heads["mnli"].dense.weight __lowerCamelCase = xmod.model.classification_heads["mnli"].dense.bias __lowerCamelCase = xmod.model.classification_heads["mnli"].out_proj.weight __lowerCamelCase = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __lowerCamelCase = xmod.model.encoder.lm_head.dense.weight __lowerCamelCase = xmod.model.encoder.lm_head.dense.bias __lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight __lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias __lowerCamelCase = xmod.model.encoder.lm_head.weight __lowerCamelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCamelCase = xmod.encode(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(__SCREAMING_SNAKE_CASE ) __lowerCamelCase = model(__SCREAMING_SNAKE_CASE )[0] if classification_head: __lowerCamelCase = xmod.model.classification_heads["mnli"](xmod.extract_features(__SCREAMING_SNAKE_CASE ) ) else: __lowerCamelCase = xmod.model(__SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 __lowerCamelCase = torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) Path(__SCREAMING_SNAKE_CASE ).mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) __A = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
370
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = True snake_case_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] __lowerCamelCase = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. __lowerCamelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __A = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
371
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] )
348
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["ConditionalDetrFeatureExtractor"] __A = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
350
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase_ ( self , lowerCamelCase__=0 ) -> int: '''simple docstring''' __lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) ) __lowerCamelCase = np.random.RandomState(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # warmup pass to apply optimizations __lowerCamelCase = pipe(**self.get_dummy_inputs() ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def lowercase_ ( self ) -> int: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = ort.SessionOptions() __lowerCamelCase = False return options def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) __lowerCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
348
0
__A = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __A = [{"type": "code", "content": INSTALL_CONTENT}] __A = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
351
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
348
0
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
352
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxGPTJModelTester(self ) def lowercase_ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @tooslow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ ) __lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowercase_ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
348
0
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = XLMProphetNetTokenizer snake_case_ = False snake_case_ = True def lowercase_ ( self ) -> int: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = '[PAD]' __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(lowerCamelCase__ ) , 1_012 ) def lowercase_ ( self ) -> str: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012 ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) __lowerCamelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowerCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowercase_ ( self ) -> str: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = 'Hello World!' __lowerCamelCase = [35_389, 6_672, 49, 2] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
353
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __A = False class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> int: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = 12 __lowerCamelCase = 12 __lowerCamelCase = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } __lowerCamelCase = TransformeraDModel(**lowerCamelCase__ ) return model def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) __lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
348
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[str]: '''simple docstring''' debug_launcher(test_script.main ) def lowercase_ ( self ) -> Any: '''simple docstring''' debug_launcher(test_ops.main )
354
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCamelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''megatron-bert''' def __init__( self , lowerCamelCase__=29_056 , lowerCamelCase__=1_024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4_096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , **lowerCamelCase__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache
355
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowerCamelCase = CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = backbone_config.pop('model_type' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def lowercase_ ( self ) -> Dict[str, any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
348
0
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> List[Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ) -> int: """simple docstring""" __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCamelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read() _check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} __lowerCamelCase = features.copy() if features else default_expected_features __lowerCamelCase = ( Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCamelCase = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read() _check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Dict: """simple docstring""" __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} __lowerCamelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read() _check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Union[str, Any]: """simple docstring""" if issubclass(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = parquet_path elif issubclass(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = [parquet_path] __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} __lowerCamelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read() _check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=("train",) ) -> Union[str, Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) for split in splits: __lowerCamelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> List[str]: """simple docstring""" __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCamelCase = ParquetDatasetReader( {'train': parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read() _check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} __lowerCamelCase = features.copy() if features else default_expected_features __lowerCamelCase = ( Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCamelCase = ParquetDatasetReader({'train': parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read() _check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" if split: __lowerCamelCase = {split: parquet_path} else: __lowerCamelCase = 'train' __lowerCamelCase = {'train': parquet_path, 'test': parquet_path} __lowerCamelCase = tmp_path / 'cache' __lowerCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} __lowerCamelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read() _check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / 'foo.parquet' ) assert writer.write() > 0 __lowerCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' ) __lowerCamelCase = pf.read() assert dataset.data.table == output_table def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = str(shared_datadir / 'test_image_rgb.jpg' ) __lowerCamelCase = {'image': [image_path]} __lowerCamelCase = Features({'image': Image()} ) __lowerCamelCase = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ ) __lowerCamelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / 'foo.parquet' ) assert writer.write() > 0 __lowerCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) ) assert dataset.features == reloaded_dataset.features __lowerCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=UpperCamelCase__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( 'feature, expected' , [ (Features({'foo': Value('int32' )} ), None), (Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(UpperCamelCase__ ) == expected
356
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
0
import json import sys def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" with open(UpperCamelCase__ , encoding='utf-8' ) as f: __lowerCamelCase = json.load(UpperCamelCase__ ) __lowerCamelCase = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(UpperCamelCase__ ): __lowerCamelCase = results[benchmark_name] __lowerCamelCase = benchmark_name.split('/' )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) __lowerCamelCase = '| metric |' __lowerCamelCase = '|--------|' __lowerCamelCase = '| new / old (diff) |' for metric_name in sorted(UpperCamelCase__ ): __lowerCamelCase = benchmark_res[metric_name] __lowerCamelCase = metric_vals['new'] __lowerCamelCase = metric_vals.get('old' , UpperCamelCase__ ) __lowerCamelCase = metric_vals.get('diff' , UpperCamelCase__ ) __lowerCamelCase = F""" {new_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else 'None' if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(UpperCamelCase__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(UpperCamelCase__ ) ) if __name__ == "__main__": __A = sys.argv[1] __A = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
357
import sys from collections import defaultdict class __lowerCAmelCase : """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.node_position[vertex] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pos def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): __lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): __lowerCamelCase = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
0
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> None: '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
358
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case_ = "question" snake_case_ = "context" snake_case_ = "answers" @property def lowercase_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ): """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
359
import requests __A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None: """simple docstring""" __lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
348
0
def lowerCamelCase_ ( UpperCamelCase__ : bytes ) -> str: """simple docstring""" return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] ) def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bytes: """simple docstring""" if (len(UpperCamelCase__ ) % 2) != 0: raise ValueError( 'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase__ ) <= set('0123456789ABCDEF' ): raise ValueError( 'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
360
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
0
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''MCTCTFeatureExtractor''' snake_case_ = '''AutoTokenizer''' def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' super().__init__(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = self.feature_extractor __lowerCamelCase = False def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) __lowerCamelCase = kwargs.pop('raw_speech' ) else: __lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ ) __lowerCamelCase = kwargs.pop('sampling_rate' , lowerCamelCase__ ) __lowerCamelCase = kwargs.pop('text' , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __lowerCamelCase = args[0] __lowerCamelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None: __lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: __lowerCamelCase = encodings['input_ids'] return inputs def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ ) __lowerCamelCase = kwargs.pop('input_features' , lowerCamelCase__ ) __lowerCamelCase = kwargs.pop('labels' , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __lowerCamelCase = args[0] __lowerCamelCase = args[1:] if input_features is not None: __lowerCamelCase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) if labels is not None: __lowerCamelCase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ ) if labels is None: return input_features elif input_features is None: return labels else: __lowerCamelCase = labels['input_ids'] return input_features def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @contextmanager def lowercase_ ( self ) -> int: '''simple docstring''' warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) __lowerCamelCase = True __lowerCamelCase = self.tokenizer yield __lowerCamelCase = self.feature_extractor __lowerCamelCase = False
361
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if "emb" in name: __lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: __lowerCamelCase = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: __lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: __lowerCamelCase = name.replace('linear1' , 'fc1' ) if "linear2" in name: __lowerCamelCase = name.replace('linear2' , 'fc2' ) if "norm1" in name: __lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: __lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: __lowerCamelCase = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: __lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: __lowerCamelCase = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]: """simple docstring""" __lowerCamelCase = list(state_dict.keys() ) __lowerCamelCase = {} for key in keys: __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __lowerCamelCase = val[:hidden_size, :] __lowerCamelCase = val[hidden_size : 2 * hidden_size, :] __lowerCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCamelCase = val else: __lowerCamelCase = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __lowerCamelCase = 1024 __lowerCamelCase = 24 __lowerCamelCase = 16 elif checkpoint == "medium": __lowerCamelCase = 1536 __lowerCamelCase = 48 __lowerCamelCase = 24 elif checkpoint == "large": __lowerCamelCase = 2048 __lowerCamelCase = 48 __lowerCamelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) __lowerCamelCase = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]: """simple docstring""" __lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ ) __lowerCamelCase = fairseq_model.lm.state_dict() __lowerCamelCase , __lowerCamelCase = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' ) __lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' ) __lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('Incorrect shape for logits' ) # now construct the processor __lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) __lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __lowerCamelCase = 2048 __lowerCamelCase = 2048 # set other default generation config params __lowerCamelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCamelCase = True __lowerCamelCase = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
0
from numpy import exp, pi, sqrt def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
362
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''sew-d''' def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = max_position_embeddings __lowerCamelCase = position_buckets __lowerCamelCase = share_att_key __lowerCamelCase = relative_attention __lowerCamelCase = norm_rel_ebd __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = feature_layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def lowercase_ ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCamelCase_ ( UpperCamelCase__ : Namespace ) -> Optional[int]: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @staticmethod def lowercase_ ( lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Model\'s type.' ) train_parser.add_argument( '--tf_checkpoint' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='TensorFlow checkpoint path or folder.' ) train_parser.add_argument( '--pytorch_dump_output' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to the PyTorch saved model output.' ) train_parser.add_argument('--config' , type=lowerCamelCase__ , default='' , help='Configuration file path or folder.' ) train_parser.add_argument( '--finetuning_task_name' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = logging.get_logger('transformers-cli/converting' ) self._logger.info(f"""Loading model {model_type}""" ) __lowerCamelCase = model_type __lowerCamelCase = tf_checkpoint __lowerCamelCase = pytorch_dump_output __lowerCamelCase = config __lowerCamelCase = finetuning_task_name def lowercase_ ( self ) -> int: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCamelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) if "ckpt" in self._tf_checkpoint.lower(): __lowerCamelCase = self._tf_checkpoint __lowerCamelCase = '' else: __lowerCamelCase = self._tf_checkpoint __lowerCamelCase = '' convert_transfo_xl_checkpoint_to_pytorch( lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
363
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
348
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __A = True except ImportError: __A = False __A = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase_ ( UpperCamelCase__ : Namespace ) -> List[str]: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @staticmethod def lowercase_ ( lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' , type=lowerCamelCase__ , help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' , type=lowerCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = testing __lowerCamelCase = testing_file __lowerCamelCase = path def lowercase_ ( self ) -> List[Any]: '''simple docstring''' warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(lowerCamelCase__ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) __lowerCamelCase = ( Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) __lowerCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(lowerCamelCase__ ) ) else: with open(self._testing_file , 'r' ) as configuration_file: __lowerCamelCase = json.load(lowerCamelCase__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , ) __lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r' ) as configuration_file: __lowerCamelCase = json.load(lowerCamelCase__ ) __lowerCamelCase = configuration['lowercase_modelname'] __lowerCamelCase = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(f"""{directory}/configuration.json""" ) __lowerCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax __lowerCamelCase = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ ) # Tests require submodules as they have parent imports with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ): pass shutil.move( f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , ) shutil.move( f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(lowerCamelCase__ ): with open(lowerCamelCase__ , 'r' ) as f: __lowerCamelCase = f.readlines() with open(lowerCamelCase__ , 'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowerCamelCase__ ) if output_pytorch: if not self._testing: remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ) if output_flax: if not self._testing: remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" ) os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ) shutil.move( f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Create temp file __lowerCamelCase , __lowerCamelCase = mkstemp() __lowerCamelCase = False with fdopen(lowerCamelCase__ , 'w' ) as new_file: with open(lowerCamelCase__ ) as old_file: for line in old_file: new_file.write(lowerCamelCase__ ) if line_to_copy_below in line: __lowerCamelCase = True for line_to_copy in lines_to_copy: new_file.write(lowerCamelCase__ ) if not line_found: raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" ) # Copy the file permissions from the old file to the new file copymode(lowerCamelCase__ , lowerCamelCase__ ) # Remove original file remove(lowerCamelCase__ ) # Move new file move(lowerCamelCase__ , lowerCamelCase__ ) def skip_units(lowerCamelCase__ ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(lowerCamelCase__ ): with open(lowerCamelCase__ ) as datafile: __lowerCamelCase = [] __lowerCamelCase = False __lowerCamelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: __lowerCamelCase = line.split('"' )[1] __lowerCamelCase = skip_units(lowerCamelCase__ ) elif "# Below: " in line and "##" not in line: __lowerCamelCase = line.split('"' )[1] __lowerCamelCase = skip_units(lowerCamelCase__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = [] elif "# Replace with" in line and "##" not in line: __lowerCamelCase = [] elif "##" not in line: lines_to_copy.append(lowerCamelCase__ ) remove(lowerCamelCase__ ) replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" ) os.rmdir(lowerCamelCase__ )
364
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = [False] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ): __lowerCamelCase = True __lowerCamelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
348
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = "The Nymphenburg Palace is a beautiful palace in Munich!" def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int: """simple docstring""" __lowerCamelCase = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1024, 'hidden_size': 768, 'max_length': 512, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1024, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1E-5, 'token_type_vocab_size': 2, } __lowerCamelCase = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __lowerCamelCase = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , UpperCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __lowerCamelCase = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab __lowerCamelCase = os.path.join(get_home_dir() , 'models' ) __lowerCamelCase = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ ) __lowerCamelCase = nlp.model.BERTModel( UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , ) original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ ) __lowerCamelCase = original_bort._collect_params_with_prefix() # Build our config 🤗 __lowerCamelCase = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(UpperCamelCase__ ), } __lowerCamelCase = BertConfig.from_dict(UpperCamelCase__ ) __lowerCamelCase = BertForMaskedLM(UpperCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(UpperCamelCase__ : Dict ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ): __lowerCamelCase = hf_param.shape __lowerCamelCase = to_torch(params[gluon_param] ) __lowerCamelCase = gluon_param.shape assert ( shape_hf == shape_gluon ), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param __lowerCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) __lowerCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) __lowerCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) __lowerCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __lowerCamelCase = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __lowerCamelCase = hf_bort_model.bert.encoder.layer[i] # self attention __lowerCamelCase = layer.attention.self __lowerCamelCase = check_and_map_params( self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) __lowerCamelCase = check_and_map_params( self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) __lowerCamelCase = check_and_map_params( self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) __lowerCamelCase = check_and_map_params( self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) __lowerCamelCase = check_and_map_params( self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) __lowerCamelCase = check_and_map_params( self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output __lowerCamelCase = layer.attention.output __lowerCamelCase = check_and_map_params( self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" ) __lowerCamelCase = check_and_map_params( self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" ) __lowerCamelCase = check_and_map_params( self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" ) __lowerCamelCase = check_and_map_params( self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate __lowerCamelCase = layer.intermediate __lowerCamelCase = check_and_map_params( intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) __lowerCamelCase = check_and_map_params( intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output __lowerCamelCase = layer.output __lowerCamelCase = check_and_map_params( bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) __lowerCamelCase = check_and_map_params( bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) __lowerCamelCase = check_and_map_params( bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) __lowerCamelCase = check_and_map_params( bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __lowerCamelCase = RobertaTokenizer.from_pretrained('roberta-base' ) __lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ )['input_ids'] # Get gluon output __lowerCamelCase = mx.nd.array([input_ids] ) __lowerCamelCase = original_bort(inputs=UpperCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = BertModel.from_pretrained(UpperCamelCase__ ) hf_bort_model.eval() __lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' ) __lowerCamelCase = hf_bort_model(**UpperCamelCase__ )[0] __lowerCamelCase = output_gluon[0].asnumpy() __lowerCamelCase = output_hf[0].detach().numpy() __lowerCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item() __lowerCamelCase = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
365
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids __lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''Pix2StructImageProcessor''' snake_case_ = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = False super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 2_048 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding: '''simple docstring''' if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: __lowerCamelCase = self.tokenizer __lowerCamelCase = self.tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values __lowerCamelCase = self.image_processor( lowerCamelCase__ , return_tensors=lowerCamelCase__ , max_patches=lowerCamelCase__ , **lowerCamelCase__ ) else: # add pixel_values and bbox __lowerCamelCase = self.image_processor( lowerCamelCase__ , return_tensors=lowerCamelCase__ , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None and not self.image_processor.is_vqa: __lowerCamelCase = self.tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) if "attention_mask" in text_encoding: __lowerCamelCase = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: __lowerCamelCase = text_encoding.pop('input_ids' ) else: __lowerCamelCase = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase__ ) return encoding_image_processor def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.tokenizer.model_input_names __lowerCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
366
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCamelCase = [3, 3, 3, 3] __lowerCamelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCamelCase = [4, 4, 4, 4] __lowerCamelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCamelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCamelCase = [3, 3, 3, 3] else: __lowerCamelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCamelCase = 96 elif "small" in model_name: __lowerCamelCase = 96 elif "base" in model_name: __lowerCamelCase = 128 elif "large" in model_name: __lowerCamelCase = 192 elif "xlarge" in model_name: __lowerCamelCase = 256 elif "huge" in model_name: __lowerCamelCase = 352 # set label information __lowerCamelCase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowerCamelCase = 'imagenet-22k-id2label.json' else: __lowerCamelCase = 'imagenet-1k-id2label.json' __lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) __lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = FocalNetConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , ) return config def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str: """simple docstring""" if "patch_embed.proj" in name: __lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowerCamelCase = 'encoder.' + name if "encoder.layers" in name: __lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowerCamelCase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowerCamelCase = 'layernorm.weight' if name == "norm.bias": __lowerCamelCase = 'layernorm.bias' if "head" in name: __lowerCamelCase = name.replace('head' , 'classifier' ) else: __lowerCamelCase = 'focalnet.' + name return name def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict: """simple docstring""" __lowerCamelCase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowerCamelCase = model_name_to_url[model_name] print('Checkpoint URL: ' , UpperCamelCase__ ) __lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = val __lowerCamelCase = get_focalnet_config(UpperCamelCase__ ) __lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCamelCase__ ) # verify conversion __lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase = BitImageProcessor( do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , ) __lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) __lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ) __lowerCamelCase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) __lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 ) __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ) elif model_name == "focalnet-tiny-lrf": __lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] ) elif model_name == "focalnet-small": __lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] ) elif model_name == "focalnet-small-lrf": __lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] ) elif model_name == "focalnet-base": __lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] ) elif model_name == "focalnet-base-lrf": __lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] ) assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) __A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __A = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") __A = ( subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split() ) __A = "|".join(sys.argv[1:]) __A = re.compile(Rf'''^({joined_dirs}).*?\.py$''') __A = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
367
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float: """simple docstring""" __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __A = [float(x) for x in input("Enter the elements of first array: ").split()] __A = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
348
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __lowerCAmelCase : """simple docstring""" snake_case_ = BlenderbotConfig snake_case_ = {} snake_case_ = '''gelu''' def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=20 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ) -> List[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCamelCase = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = TFBlenderbotModel(config=lowerCamelCase__ ).get_decoder() __lowerCamelCase = inputs_dict['input_ids'] __lowerCamelCase = input_ids[:1, :] __lowerCamelCase = inputs_dict['attention_mask'][:1, :] __lowerCamelCase = inputs_dict['head_mask'] __lowerCamelCase = 1 # first forward pass __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx] __lowerCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase__ , lowerCamelCase__ , rtol=1e-3 ) def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , ) -> List[str]: """simple docstring""" if attention_mask is None: __lowerCamelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowerCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () snake_case_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () snake_case_ = ( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = TFBlenderbotModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ ) @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case_ = ['''My friends are cool but they eat too many carbs.'''] snake_case_ = '''facebook/blenderbot-400M-distill''' @cached_property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.tokenizer(self.src_text , return_tensors='tf' ) __lowerCamelCase = self.model.generate( model_inputs.input_ids , ) __lowerCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
368
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
0
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa ) __lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa ) __lowerCamelCase = controlnet_params __lowerCamelCase = 'bird' __lowerCamelCase = jax.device_count() __lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) __lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples ) __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.split(lowerCamelCase__ , jax.device_count() ) __lowerCamelCase = replicate(lowerCamelCase__ ) __lowerCamelCase = shard(lowerCamelCase__ ) __lowerCamelCase = shard(lowerCamelCase__ ) __lowerCamelCase = pipe( prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCamelCase = images[0, 253:256, 253:256, -1] __lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCamelCase = jnp.array( [0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa ) __lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa ) __lowerCamelCase = controlnet_params __lowerCamelCase = 'Chef in the kitchen' __lowerCamelCase = jax.device_count() __lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) __lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples ) __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.split(lowerCamelCase__ , jax.device_count() ) __lowerCamelCase = replicate(lowerCamelCase__ ) __lowerCamelCase = shard(lowerCamelCase__ ) __lowerCamelCase = shard(lowerCamelCase__ ) __lowerCamelCase = pipe( prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCamelCase = images[0, 253:256, 253:256, -1] __lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCamelCase = jnp.array( [[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
369
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''philschmid/bart-large-cnn-samsum''' snake_case_ = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) snake_case_ = '''summarizer''' snake_case_ = AutoTokenizer snake_case_ = AutoModelForSeqaSeqLM snake_case_ = ['''text'''] snake_case_ = ['''text'''] def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' return self.model.generate(**lowerCamelCase__ )[0] def lowercase_ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { "configuration_blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["BlenderbotSmallTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBlenderbotSmallPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
370
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = True snake_case_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] __lowerCamelCase = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. __lowerCamelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
348
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) __lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __lowerCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids __lowerCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids __lowerCamelCase = shift_tokens_right(lowerCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(lowerCamelCase__ , onehot(lowerCamelCase__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
371
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] )
348
0
from sklearn.metrics import matthews_corrcoef import datasets __A = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" __A = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" __A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html' ] , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]: '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ ) ), }
350
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase_ ( self , lowerCamelCase__=0 ) -> int: '''simple docstring''' __lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) ) __lowerCamelCase = np.random.RandomState(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # warmup pass to apply optimizations __lowerCamelCase = pipe(**self.get_dummy_inputs() ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def lowercase_ ( self ) -> int: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = ort.SessionOptions() __lowerCamelCase = False return options def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) __lowerCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
348
0
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = (DDPMParallelScheduler,) def lowercase_ ( self , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = { 'num_train_timesteps': 1_000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**lowerCamelCase__ ) return config def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCamelCase__ ) def lowercase_ ( self ) -> Any: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.check_over_configs(thresholding=lowerCamelCase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , ) def lowercase_ ( self ) -> str: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def lowercase_ ( self ) -> Any: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = len(lowerCamelCase__ ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter __lowerCamelCase = self.dummy_sample_deter + 0.1 __lowerCamelCase = self.dummy_sample_deter - 0.1 __lowerCamelCase = samplea.shape[0] __lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) __lowerCamelCase = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1 , lowerCamelCase__ ) __lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __lowerCamelCase = scheduler.batch_step_no_noise(lowerCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2 assert abs(result_mean.item() - 0.50_05 ) < 1e-3 def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = len(lowerCamelCase__ ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter __lowerCamelCase = torch.manual_seed(0 ) for t in reversed(range(lowerCamelCase__ ) ): # 1. predict noise residual __lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ ) # 2. predict previous mean of sample x_t-1 __lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.33_72 ) < 1e-3 def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = len(lowerCamelCase__ ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter __lowerCamelCase = torch.manual_seed(0 ) for t in reversed(range(lowerCamelCase__ ) ): # 1. predict noise residual __lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ ) # 2. predict previous mean of sample x_t-1 __lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.26_31 ) < 1e-3 def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCamelCase__ ) __lowerCamelCase = scheduler.timesteps for i, timestep in enumerate(lowerCamelCase__ ): if i == len(lowerCamelCase__ ) - 1: __lowerCamelCase = -1 else: __lowerCamelCase = timesteps[i + 1] __lowerCamelCase = scheduler.previous_timestep(lowerCamelCase__ ) __lowerCamelCase = prev_t.item() self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(lowerCamelCase__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = [100, 87, 50, 1, 0] __lowerCamelCase = len(lowerCamelCase__ ) with self.assertRaises(lowerCamelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase__ , timesteps=lowerCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) __lowerCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=lowerCamelCase__ )
351
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
348
0
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = SpeechTaTokenizer snake_case_ = False snake_case_ = True def lowercase_ ( self ) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = SpeechTaTokenizer(lowerCamelCase__ ) __lowerCamelCase = AddedToken('<mask>' , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 'this is a test' __lowerCamelCase = 'this is a test' return input_text, output_text def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=20 , lowerCamelCase__=5 ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(lowerCamelCase__ ) __lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __lowerCamelCase = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) return text, ids def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = '<pad>' __lowerCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-4] , 'œ' ) self.assertEqual(vocab_keys[-2] , '<mask>' ) self.assertEqual(vocab_keys[-1] , '<ctc_blank>' ) self.assertEqual(len(lowerCamelCase__ ) , 81 ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __lowerCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd'] __lowerCamelCase = tokenizer.add_tokens(lowerCamelCase__ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , all_size + len(lowerCamelCase__ ) ) __lowerCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowerCamelCase__ ) self.assertGreaterEqual(len(lowerCamelCase__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __lowerCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} __lowerCamelCase = tokenizer.add_special_tokens(lowerCamelCase__ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , all_size_a + len(lowerCamelCase__ ) ) __lowerCamelCase = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowerCamelCase__ ) self.assertGreaterEqual(len(lowerCamelCase__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowercase_ ( self ) -> str: '''simple docstring''' pass def lowercase_ ( self ) -> int: '''simple docstring''' pass def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(lowerCamelCase__ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) __lowerCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) # fmt: off self.assertListEqual(lowerCamelCase__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on __lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off __lowerCamelCase = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=lowerCamelCase__ , )
352
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxGPTJModelTester(self ) def lowercase_ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @tooslow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ ) __lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowercase_ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
348
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json", "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json", } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''encodec''' def __init__( self , lowerCamelCase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase__=24_000 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=128 , lowerCamelCase__=32 , lowerCamelCase__=1 , lowerCamelCase__=[8, 5, 4, 2] , lowerCamelCase__="weight_norm" , lowerCamelCase__=7 , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__="reflect" , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__=1_024 , lowerCamelCase__=None , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Any: '''simple docstring''' __lowerCamelCase = target_bandwidths __lowerCamelCase = sampling_rate __lowerCamelCase = audio_channels __lowerCamelCase = normalize __lowerCamelCase = chunk_length_s __lowerCamelCase = overlap __lowerCamelCase = hidden_size __lowerCamelCase = num_filters __lowerCamelCase = num_residual_layers __lowerCamelCase = upsampling_ratios __lowerCamelCase = norm_type __lowerCamelCase = kernel_size __lowerCamelCase = last_kernel_size __lowerCamelCase = residual_kernel_size __lowerCamelCase = dilation_growth_rate __lowerCamelCase = use_causal_conv __lowerCamelCase = pad_mode __lowerCamelCase = compress __lowerCamelCase = num_lstm_layers __lowerCamelCase = trim_right_ratio __lowerCamelCase = codebook_size __lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size __lowerCamelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**lowerCamelCase__ ) @property def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def lowercase_ ( self ) -> int: '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
353
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __A = False class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> int: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = 12 __lowerCamelCase = 12 __lowerCamelCase = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } __lowerCamelCase = TransformeraDModel(**lowerCamelCase__ ) return model def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) __lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
348
0
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" snake_case_ = 1 @register_to_config def __init__( self , lowerCamelCase__=2_000 , lowerCamelCase__=0.1 , lowerCamelCase__=20 , lowerCamelCase__=1e-3 ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Any: '''simple docstring''' __lowerCamelCase = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]: '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score __lowerCamelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) __lowerCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) __lowerCamelCase = std.flatten() while len(std.shape ) < len(score.shape ): __lowerCamelCase = std.unsqueeze(-1 ) __lowerCamelCase = -score / std # compute __lowerCamelCase = -1.0 / len(self.timesteps ) __lowerCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) __lowerCamelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): __lowerCamelCase = beta_t.unsqueeze(-1 ) __lowerCamelCase = -0.5 * beta_t * x __lowerCamelCase = torch.sqrt(lowerCamelCase__ ) __lowerCamelCase = drift - diffusion**2 * score __lowerCamelCase = x + drift * dt # add noise __lowerCamelCase = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype ) __lowerCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Optional[Any]: '''simple docstring''' return self.config.num_train_timesteps
354
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
0
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class __lowerCamelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''autoformer''' snake_case_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = True , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 64 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = True , lowerCamelCase__=True , lowerCamelCase__ = 10 , lowerCamelCase__ = 25 , lowerCamelCase__ = 3 , **lowerCamelCase__ , ) -> List[Any]: '''simple docstring''' __lowerCamelCase = prediction_length __lowerCamelCase = context_length if context_length is not None else prediction_length __lowerCamelCase = distribution_output __lowerCamelCase = loss __lowerCamelCase = input_size __lowerCamelCase = num_time_features __lowerCamelCase = lags_sequence __lowerCamelCase = scaling __lowerCamelCase = num_dynamic_real_features __lowerCamelCase = num_static_real_features __lowerCamelCase = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(lowerCamelCase__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __lowerCamelCase = cardinality else: __lowerCamelCase = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(lowerCamelCase__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __lowerCamelCase = embedding_dimension else: __lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __lowerCamelCase = num_parallel_samples # Transformer architecture configuration __lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features __lowerCamelCase = d_model __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_attention_heads __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = use_cache # Autoformer __lowerCamelCase = label_length __lowerCamelCase = moving_average __lowerCamelCase = autocorrelation_factor super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ ) @property def lowercase_ ( self ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
355
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowerCamelCase = CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = backbone_config.pop('model_type' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def lowercase_ ( self ) -> Dict[str, any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
348
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
356
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
0
import numpy as np from PIL import Image def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray: """simple docstring""" __lowerCamelCase = np.array(UpperCamelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 # compute the shape of the output matrix __lowerCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __lowerCamelCase = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __lowerCamelCase = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __lowerCamelCase = 0 __lowerCamelCase = 0 return updated_arr def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray: """simple docstring""" __lowerCamelCase = np.array(UpperCamelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 # compute the shape of the output matrix __lowerCamelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __lowerCamelCase = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __lowerCamelCase = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __lowerCamelCase = 0 __lowerCamelCase = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image __A = Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
357
import sys from collections import defaultdict class __lowerCAmelCase : """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.node_position[vertex] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pos def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): __lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): __lowerCamelCase = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } __A = { "facebook/nllb-large-en-ro": 10_24, "facebook/nllb-200-distilled-600M": 10_24, } # fmt: off __A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = NllbTokenizer snake_case_ = [] snake_case_ = [] def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[Any]: '''simple docstring''' __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token __lowerCamelCase = legacy_behaviour super().__init__( vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , ) __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True __lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) __lowerCamelCase = { lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __lowerCamelCase = src_lang if src_lang is not None else 'eng_Latn' __lowerCamelCase = self.convert_tokens_to_ids(self._src_lang ) __lowerCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowercase_ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __lowerCamelCase = src_lang __lowerCamelCase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) __lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ ) __lowerCamelCase = tgt_lang_id return inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = "eng_Latn" , lowerCamelCase__ = None , lowerCamelCase__ = "fra_Latn" , **lowerCamelCase__ , ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = src_lang __lowerCamelCase = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def lowercase_ ( self ) -> Dict: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __lowerCamelCase = os.path.join( lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
358
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case_ = "question" snake_case_ = "context" snake_case_ = "answers" @property def lowercase_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
0
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ): """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
359
import requests __A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None: """simple docstring""" __lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
348
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = ['''pixel_values'''] def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) __lowerCamelCase = size if size is not None else {'shortest_edge': 224} __lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) __lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224} __lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name='crop_size' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray: '''simple docstring''' __lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray: '''simple docstring''' __lowerCamelCase = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[Any]: '''simple docstring''' return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> PIL.Image.Image: '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='size' , default_to_square=lowerCamelCase__ ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' , default_to_square=lowerCamelCase__ ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(lowerCamelCase__ ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] __lowerCamelCase = {'pixel_values': images} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
360
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __A = { "google/bigbird-roberta-base": 40_96, "google/bigbird-roberta-large": 40_96, "google/bigbird-base-trivia-itc": 40_96, } __A = "▁" class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = BigBirdTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = [] def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<unk>" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<pad>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[MASK]" , lowerCamelCase__="[CLS]" , **lowerCamelCase__ , ) -> Any: '''simple docstring''' __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , ) __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
361
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if "emb" in name: __lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: __lowerCamelCase = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: __lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: __lowerCamelCase = name.replace('linear1' , 'fc1' ) if "linear2" in name: __lowerCamelCase = name.replace('linear2' , 'fc2' ) if "norm1" in name: __lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: __lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: __lowerCamelCase = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: __lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: __lowerCamelCase = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]: """simple docstring""" __lowerCamelCase = list(state_dict.keys() ) __lowerCamelCase = {} for key in keys: __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __lowerCamelCase = val[:hidden_size, :] __lowerCamelCase = val[hidden_size : 2 * hidden_size, :] __lowerCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCamelCase = val else: __lowerCamelCase = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __lowerCamelCase = 1024 __lowerCamelCase = 24 __lowerCamelCase = 16 elif checkpoint == "medium": __lowerCamelCase = 1536 __lowerCamelCase = 48 __lowerCamelCase = 24 elif checkpoint == "large": __lowerCamelCase = 2048 __lowerCamelCase = 48 __lowerCamelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) __lowerCamelCase = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]: """simple docstring""" __lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ ) __lowerCamelCase = fairseq_model.lm.state_dict() __lowerCamelCase , __lowerCamelCase = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' ) __lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' ) __lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('Incorrect shape for logits' ) # now construct the processor __lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) __lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __lowerCamelCase = 2048 __lowerCamelCase = 2048 # set other default generation config params __lowerCamelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCamelCase = True __lowerCamelCase = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
0
class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = name __lowerCamelCase = val def __str__( self ) -> Any: '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' return self.val < other.val class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = self.build_heap(lowerCamelCase__ ) def __getitem__( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' return self.get_value(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) - 1 __lowerCamelCase = self.get_parent_idx(lowerCamelCase__ ) for idx, i in enumerate(lowerCamelCase__ ): __lowerCamelCase = idx __lowerCamelCase = i.val for i in range(lowerCamelCase__ , -1 , -1 ): self.sift_down(lowerCamelCase__ , lowerCamelCase__ ) return array def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' while True: __lowerCamelCase = self.get_left_child_idx(lowerCamelCase__ ) # noqa: E741 __lowerCamelCase = self.get_right_child_idx(lowerCamelCase__ ) __lowerCamelCase = idx if l < len(lowerCamelCase__ ) and array[l] < array[idx]: __lowerCamelCase = l if r < len(lowerCamelCase__ ) and array[r] < array[smallest]: __lowerCamelCase = r if smallest != idx: __lowerCamelCase , __lowerCamelCase = array[smallest], array[idx] ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) __lowerCamelCase = smallest else: break def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.get_parent_idx(lowerCamelCase__ ) while p >= 0 and self.heap[p] > self.heap[idx]: __lowerCamelCase , __lowerCamelCase = self.heap[idx], self.heap[p] __lowerCamelCase , __lowerCamelCase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) __lowerCamelCase = p __lowerCamelCase = self.get_parent_idx(lowerCamelCase__ ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return self.heap[0] def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.heap[-1], self.heap[0] __lowerCamelCase , __lowerCamelCase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) __lowerCamelCase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' self.heap.append(lowerCamelCase__ ) __lowerCamelCase = len(self.heap ) - 1 __lowerCamelCase = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" __lowerCamelCase = new_value __lowerCamelCase = new_value self.sift_up(self.idx_of_element[node] ) __A = Node("R", -1) __A = Node("B", 6) __A = Node("A", 3) __A = Node("X", 1) __A = Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __A = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
362
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''sew-d''' def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = max_position_embeddings __lowerCamelCase = position_buckets __lowerCamelCase = share_att_key __lowerCamelCase = relative_attention __lowerCamelCase = norm_rel_ebd __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = feature_layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def lowercase_ ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] ) -> str: """simple docstring""" __lowerCamelCase = '' for word_or_phrase in separated: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(UpperCamelCase__ ) if __name__ == "__main__": from doctest import testmod testmod()
363
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
348
0
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , ) -> Any: """simple docstring""" __lowerCamelCase = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } __lowerCamelCase , __lowerCamelCase = input_paths_and_base_extractors[compression_format] if input_path is None: __lowerCamelCase = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase__ ) assert base_extractor.is_extractable(UpperCamelCase__ ) __lowerCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __lowerCamelCase = file_path.read_text(encoding='utf-8' ) else: __lowerCamelCase = output_path.read_text(encoding='utf-8' ) __lowerCamelCase = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } __lowerCamelCase = input_paths[compression_format] if input_path is None: __lowerCamelCase = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase__ ) __lowerCamelCase = Extractor.infer_extractor_format(UpperCamelCase__ ) assert extractor_format is not None __lowerCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __lowerCamelCase = file_path.read_text(encoding='utf-8' ) else: __lowerCamelCase = output_path.read_text(encoding='utf-8' ) __lowerCamelCase = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" import tarfile __lowerCamelCase = tmp_path / 'data_dot_dot' directory.mkdir() __lowerCamelCase = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f: f.add(UpperCamelCase__ , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[Any]: """simple docstring""" import tarfile __lowerCamelCase = tmp_path / 'data_sym_link' directory.mkdir() __lowerCamelCase = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=UpperCamelCase__ ) with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __lowerCamelCase = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } __lowerCamelCase = insecure_tar_files[insecure_tar_file] __lowerCamelCase = tmp_path / 'extracted' TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Any: """simple docstring""" __lowerCamelCase = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 __lowerCamelCase = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(UpperCamelCase__ ) assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
364
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = [False] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ): __lowerCamelCase = True __lowerCamelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
348
0
import os import jsonlines import numpy as np from tqdm import tqdm __A = 20_48 __A = 40_96 __A = 42 __A = os.environ.pop("PROCESS_TRAIN", "false") __A = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" def choose_first(UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False ): assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) if len(UpperCamelCase__ ) == 1: __lowerCamelCase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __lowerCamelCase = {k: [a[k]] for k in a} if len(a['start_token'] ) > 0: break return a __lowerCamelCase = {'id': example['id']} __lowerCamelCase = example['annotations'] __lowerCamelCase = annotation['yes_no_answer'] if 0 in yes_no_answer or 1 in yes_no_answer: __lowerCamelCase = ['yes'] if 1 in yes_no_answer else ['no'] __lowerCamelCase = __lowerCamelCase = [] __lowerCamelCase = __lowerCamelCase = [] __lowerCamelCase = ['<cls>'] else: __lowerCamelCase = ['short'] __lowerCamelCase = choose_first(annotation['short_answers'] ) if len(out['start_token'] ) == 0: # answer will be long if short is not available __lowerCamelCase = ['long'] __lowerCamelCase = choose_first(annotation['long_answer'] , is_long_answer=UpperCamelCase__ ) __lowerCamelCase = [] answer.update(UpperCamelCase__ ) # disregard some samples if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]: __lowerCamelCase = True else: __lowerCamelCase = False __lowerCamelCase = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text'] if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ): raise ValueError('Issue in ID' , example['id'] ) return answer def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=False ) -> int: """simple docstring""" __lowerCamelCase = _get_single_answer(UpperCamelCase__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowerCamelCase = example['document']['tokens'] __lowerCamelCase = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __lowerCamelCase = ['start_token', 'end_token'] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __lowerCamelCase = example['document']['tokens'] __lowerCamelCase = answer['start_token'] __lowerCamelCase = answer['end_token'] __lowerCamelCase = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __lowerCamelCase = ' '.join(context[start_token:end_token] ) # checking above code if assertion: __lowerCamelCase = doc['is_html'][answer['start_token'] : answer['end_token']] __lowerCamelCase = doc['token'][answer['start_token'] : answer['end_token']] __lowerCamelCase = ' '.join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] ) if new != old: print('ID:' , example['id'] ) print('New:' , UpperCamelCase__ , end='\n' ) print('Old:' , UpperCamelCase__ , end='\n\n' ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=2048 , UpperCamelCase__ : List[Any]=4096 , UpperCamelCase__ : List[str]=True ) -> Dict: """simple docstring""" __lowerCamelCase = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ ) __lowerCamelCase = out['answer'] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __lowerCamelCase = tokenizer(example['question']['text'] , out['context'] ).input_ids __lowerCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = input_ids[:q_len] __lowerCamelCase = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) for i in doc_start_indices: __lowerCamelCase = i + max_length - q_len __lowerCamelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['category'][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(UpperCamelCase__ ), "end_token": [-100] * len(UpperCamelCase__ ), "category": category, }, } __lowerCamelCase = out['context'].split() __lowerCamelCase = splitted_context[answer['end_token']] __lowerCamelCase = len( tokenizer( ' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=UpperCamelCase__ , ).input_ids ) __lowerCamelCase = len( tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=UpperCamelCase__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __lowerCamelCase = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __lowerCamelCase = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive __lowerCamelCase = answer['start_token'] __lowerCamelCase = answer['end_token'] if assertion: __lowerCamelCase = tokenizer.decode(UpperCamelCase__ ) if answer["span"] != new: print('ISSUE IN TOKENIZATION' ) print('OLD:' , answer['span'] ) print('NEW:' , UpperCamelCase__ , end='\n\n' ) if len(UpperCamelCase__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __lowerCamelCase = input_ids[:q_len] __lowerCamelCase = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] # null, yes, no, long, short for i in doc_start_indices: __lowerCamelCase = i + max_length - q_len __lowerCamelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __lowerCamelCase = start_token - i + q_len __lowerCamelCase = end_token - i + q_len answers_category.append(answer['category'][0] ) # ["short"] -> "short" else: __lowerCamelCase = -100 __lowerCamelCase = -100 answers_category.append('null' ) __lowerCamelCase = inputs[-1][start_token : end_token + 1] answers_start_token.append(UpperCamelCase__ ) answers_end_token.append(UpperCamelCase__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('ISSUE in strided for ID:' , example['id'] ) print('New:' , tokenizer.decode(UpperCamelCase__ ) ) print('Old:' , tokenizer.decode(UpperCamelCase__ ) , end='\n\n' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=2048 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str=False ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = get_strided_contexts_and_ans( UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , ) return example def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" with jsonlines.open(UpperCamelCase__ , 'a' ) as writer: for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc='Saving samples ... ' ): __lowerCamelCase = example['labels'] for ids, start, end, cat in zip( example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { 'input_ids': ids, 'start_token': start, 'end_token': end, 'category': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __A = load_dataset("natural_questions") __A = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") __A = data["train" if PROCESS_TRAIN == "true" else "validation"] __A = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } __A = data.map(prepare_inputs, fn_kwargs=fn_kwargs) __A = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) __A = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
365
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids __lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
0
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=False ) -> List[Any]: """simple docstring""" try: __lowerCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCamelCase = default else: # KEY is set, convert it to True or False. try: __lowerCamelCase = strtobool(UpperCamelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __A = parse_flag_from_env("RUN_SLOW", default=False) __A = parse_flag_from_env("RUN_REMOTE", default=False) __A = parse_flag_from_env("RUN_LOCAL", default=True) __A = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression __A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") __A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") __A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio __A = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam __A = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility __A = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows __A = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" try: import faiss # noqa except ImportError: __lowerCamelCase = unittest.skip('test requires faiss' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" try: import regex # noqa except ImportError: __lowerCamelCase = unittest.skip('test requires regex' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __lowerCamelCase = unittest.skip('test requires elasticsearch' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __lowerCamelCase = unittest.skip('test requires sqlalchemy' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]: """simple docstring""" if not config.TORCH_AVAILABLE: __lowerCamelCase = unittest.skip('test requires PyTorch' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]: """simple docstring""" if not config.TF_AVAILABLE: __lowerCamelCase = unittest.skip('test requires TensorFlow' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[Any]: """simple docstring""" if not config.JAX_AVAILABLE: __lowerCamelCase = unittest.skip('test requires JAX' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple: """simple docstring""" if not config.PIL_AVAILABLE: __lowerCamelCase = unittest.skip('test requires Pillow' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Dict: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(UpperCamelCase__ ) else: return test_case def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Any: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(UpperCamelCase__ ) else: return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(UpperCamelCase__ ) else: return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" def _require_spacy_model(UpperCamelCase__ : Optional[int] ): try: import spacy # noqa F401 spacy.load(UpperCamelCase__ ) except ImportError: return unittest.skip('test requires spacy' )(UpperCamelCase__ ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(UpperCamelCase__ ) )(UpperCamelCase__ ) else: return test_case return _require_spacy_model def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(UpperCamelCase__ ) else: return test_case def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(UpperCamelCase__ ) else: return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __lowerCamelCase = unittest.skip('test is slow' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __lowerCamelCase = unittest.skip('test is local' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> int: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __lowerCamelCase = unittest.skip('test is packaged' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __lowerCamelCase = unittest.skip('test requires remote' )(UpperCamelCase__ ) return test_case def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" def decorate(cls : Any ): for name, fn in cls.__dict__.items(): if callable(UpperCamelCase__ ) and name.startswith('test' ): for decorator in decorators: __lowerCamelCase = decorator(UpperCamelCase__ ) setattr(cls , UpperCamelCase__ , UpperCamelCase__ ) return cls return decorate class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" pass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 0 snake_case_ = 1 snake_case_ = 2 @contextmanager def lowerCamelCase_ ( UpperCamelCase__ : int=OfflineSimulationMode.CONNECTION_FAILS , UpperCamelCase__ : Optional[Any]=1E-16 ) -> List[str]: """simple docstring""" __lowerCamelCase = requests.Session().request def timeout_request(UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ): # Change the url to an invalid url so that the connection hangs __lowerCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) __lowerCamelCase = timeout try: return online_request(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __lowerCamelCase = url __lowerCamelCase = e.args[0] __lowerCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),) __lowerCamelCase = (max_retry_error,) raise def raise_connection_error(UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=UpperCamelCase__ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , UpperCamelCase__ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , UpperCamelCase__ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> List[str]: """simple docstring""" __lowerCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*UpperCamelCase__ , **UpperCamelCase__ ) as tmp_dir: try: os.chdir(UpperCamelCase__ ) yield finally: os.chdir(UpperCamelCase__ ) @contextmanager def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" import gc gc.collect() __lowerCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" import gc gc.collect() __lowerCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" return deepcopy(UpperCamelCase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(UpperCamelCase__ ).integers(0 , 100 , 10 ).tolist() def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ): try: return func(*UpperCamelCase__ , **UpperCamelCase__ ) except HTTPError as err: if str(UpperCamelCase__ ).startswith('500' ) or str(UpperCamelCase__ ).startswith('502' ): pytest.xfail(str(UpperCamelCase__ ) ) raise err return decorator.decorator(_wrapper , UpperCamelCase__ ) class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = returncode __lowerCamelCase = stdout __lowerCamelCase = stderr async def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" while True: __lowerCamelCase = await stream.readline() if line: callback(UpperCamelCase__ ) else: break async def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(UpperCamelCase__ ) ) __lowerCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCamelCase = [] __lowerCamelCase = [] def tee(UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict="" ): __lowerCamelCase = line.decode('utf-8' ).rstrip() sink.append(UpperCamelCase__ ) if not quiet: print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='stderr:' ) ), ] , timeout=UpperCamelCase__ , ) return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=180 , UpperCamelCase__ : str=False , UpperCamelCase__ : Dict=True ) -> _RunOutput: """simple docstring""" __lowerCamelCase = asyncio.get_event_loop() __lowerCamelCase = loop.run_until_complete( _stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) ) __lowerCamelCase = ' '.join(UpperCamelCase__ ) if result.returncode > 0: __lowerCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""'{cmd_str}' produced no output.""" ) return result def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" __lowerCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __lowerCamelCase = re.sub(R'^gw' , '' , UpperCamelCase__ , 0 , re.M ) return int(UpperCamelCase__ ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = 2_9500 __lowerCamelCase = pytest_xdist_worker_id() return port + uniq_delta
366
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False __lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowerCamelCase = [3, 3, 3, 3] __lowerCamelCase = [5, 5, 5, 5] elif "fl4" in model_name: __lowerCamelCase = [4, 4, 4, 4] __lowerCamelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowerCamelCase = [3, 3, 3, 3] if "lrf" in model_name: __lowerCamelCase = [3, 3, 3, 3] else: __lowerCamelCase = [2, 2, 2, 2] if "tiny" in model_name: __lowerCamelCase = 96 elif "small" in model_name: __lowerCamelCase = 96 elif "base" in model_name: __lowerCamelCase = 128 elif "large" in model_name: __lowerCamelCase = 192 elif "xlarge" in model_name: __lowerCamelCase = 256 elif "huge" in model_name: __lowerCamelCase = 352 # set label information __lowerCamelCase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowerCamelCase = 'imagenet-22k-id2label.json' else: __lowerCamelCase = 'imagenet-1k-id2label.json' __lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) __lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = FocalNetConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , ) return config def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str: """simple docstring""" if "patch_embed.proj" in name: __lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowerCamelCase = 'encoder.' + name if "encoder.layers" in name: __lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowerCamelCase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowerCamelCase = 'layernorm.weight' if name == "norm.bias": __lowerCamelCase = 'layernorm.bias' if "head" in name: __lowerCamelCase = name.replace('head' , 'classifier' ) else: __lowerCamelCase = 'focalnet.' + name return name def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict: """simple docstring""" __lowerCamelCase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowerCamelCase = model_name_to_url[model_name] print('Checkpoint URL: ' , UpperCamelCase__ ) __lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = val __lowerCamelCase = get_focalnet_config(UpperCamelCase__ ) __lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCamelCase__ ) # verify conversion __lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase = BitImageProcessor( do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , ) __lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) __lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ) __lowerCamelCase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) __lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 ) __lowerCamelCase = model(**UpperCamelCase__ ) __lowerCamelCase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ) elif model_name == "focalnet-tiny-lrf": __lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] ) elif model_name == "focalnet-small": __lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] ) elif model_name == "focalnet-small-lrf": __lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] ) elif model_name == "focalnet-base": __lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] ) elif model_name == "focalnet-base-lrf": __lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] ) assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) __A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __A = logging.get_logger(__name__) __A = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''longformer''' def __init__( self , lowerCamelCase__ = 512 , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 0 , lowerCamelCase__ = 2 , lowerCamelCase__ = 30_522 , lowerCamelCase__ = 768 , lowerCamelCase__ = 12 , lowerCamelCase__ = 12 , lowerCamelCase__ = 3_072 , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 512 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1e-12 , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) __lowerCamelCase = attention_window __lowerCamelCase = sep_token_id __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = onnx_export class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ = "default" , lowerCamelCase__ = None ) -> int: '''simple docstring''' super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = True @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowerCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = super().outputs if self.task == "default": __lowerCamelCase = {0: 'batch'} return outputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-4 @property def lowercase_ ( self ) -> int: '''simple docstring''' return max(super().default_onnx_opset , 14 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = super().generate_dummy_inputs( preprocessor=lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly __lowerCamelCase = torch.zeros_like(inputs['input_ids'] ) # make every second token global __lowerCamelCase = 1 return inputs
367
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float: """simple docstring""" __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __A = [float(x) for x in input("Enter the elements of first array: ").split()] __A = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
348
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
368
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
369
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''philschmid/bart-large-cnn-samsum''' snake_case_ = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) snake_case_ = '''summarizer''' snake_case_ = AutoTokenizer snake_case_ = AutoModelForSeqaSeqLM snake_case_ = ['''text'''] snake_case_ = ['''text'''] def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' return self.model.generate(**lowerCamelCase__ )[0] def lowercase_ ( self , lowerCamelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
348
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
370
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = True snake_case_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] __lowerCamelCase = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. __lowerCamelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ ) __lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __lowerCamelCase = model(lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
348
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) __lowerCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __lowerCamelCase = {'unk_token': '<unk>'} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCamelCase__ ) ) __lowerCamelCase = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } __lowerCamelCase = os.path.join(self.tmpdirname , lowerCamelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def lowercase_ ( self , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ ) __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __lowerCamelCase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 ) __lowerCamelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='np' ) __lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowerCamelCase = 'lower newer' __lowerCamelCase = processor(text=lowerCamelCase__ ) __lowerCamelCase = tokenizer(lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowerCamelCase = 'lower newer' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(lowerCamelCase__ ) __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowerCamelCase = 'lower newer' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
371
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] )
348
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def lowercase_ ( self ) -> List[Any]: '''simple docstring''' super().setUp() # fmt: off __lowerCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on __lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + '\n' ) def lowercase_ ( self , **lowerCamelCase__ ) -> str: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 'tester' __lowerCamelCase = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def lowercase_ ( self ) -> int: '''simple docstring''' pass def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) __lowerCamelCase = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) __lowerCamelCase = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) self.assertTrue(special_token not in decoded ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(lowerCamelCase__ ) __lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) __lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertNotEqual(len(lowerCamelCase__ ) , 0 ) __lowerCamelCase = tokenizer.decode(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(text_a.replace(' ' , '' ) , lowerCamelCase__ ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def lowercase_ ( self ) -> str: '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' pass
350
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase_ ( self , lowerCamelCase__=0 ) -> int: '''simple docstring''' __lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) ) __lowerCamelCase = np.random.RandomState(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # warmup pass to apply optimizations __lowerCamelCase = pipe(**self.get_dummy_inputs() ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = self.get_dummy_inputs() __lowerCamelCase = pipe(**lowerCamelCase__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def lowercase_ ( self ) -> int: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = ort.SessionOptions() __lowerCamelCase = False return options def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __lowerCamelCase = init_image.resize((768, 512) ) __lowerCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A fantasy landscape, trending on artstation' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
348
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __A = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __A = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = BartTokenizer def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space: __lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) ) __lowerCamelCase = add_prefix_space __lowerCamelCase = pre_tok_class(**lowerCamelCase__ ) __lowerCamelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __lowerCamelCase = 'post_processor' __lowerCamelCase = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: __lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowerCamelCase = tuple(state['sep'] ) if "cls" in state: __lowerCamelCase = tuple(state['cls'] ) __lowerCamelCase = False if state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space: __lowerCamelCase = add_prefix_space __lowerCamelCase = True if state.get('trim_offsets' , lowerCamelCase__ ) != trim_offsets: __lowerCamelCase = trim_offsets __lowerCamelCase = True if changes_to_apply: __lowerCamelCase = getattr(lowerCamelCase__ , state.pop('type' ) ) __lowerCamelCase = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value __lowerCamelCase = value def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' __lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple: '''simple docstring''' __lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
351
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
348
0
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int: """simple docstring""" __lowerCamelCase = abs(UpperCamelCase__ ) __lowerCamelCase = 0 while n > 0: res += n % 10 n //= 10 return res def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int: """simple docstring""" __lowerCamelCase = abs(UpperCamelCase__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int: """simple docstring""" return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) ) def lowerCamelCase_ ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase__ : Callable , UpperCamelCase__ : int ) -> None: __lowerCamelCase = F"""{func.__name__}({value})""" __lowerCamelCase = timeit(F"""__main__.{call}""" , setup='import __main__' ) print(F"""{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds""" ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
352
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 20 __lowerCamelCase = model_class_name(lowerCamelCase__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , ) __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = FlaxGPTJModelTester(self ) def lowercase_ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @tooslow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ ) __lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval() __lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple() __lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) __lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowercase_ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
348
0
import heapq def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> set[int]: """simple docstring""" __lowerCamelCase = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(UpperCamelCase__ , [-1 * len(UpperCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices __lowerCamelCase = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices __lowerCamelCase = heapq.heappop(UpperCamelCase__ )[1][0] chosen_vertices.add(UpperCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: __lowerCamelCase = elem[1][1].index(UpperCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(UpperCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
353
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __A = False class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> int: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return 12 @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = 12 __lowerCamelCase = 12 __lowerCamelCase = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } __lowerCamelCase = TransformeraDModel(**lowerCamelCase__ ) return model def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.dummy_vqvae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_transformer __lowerCamelCase = VQDiffusionScheduler(self.num_embed ) __lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCamelCase = VQDiffusionPipeline( vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'teddy bear playing in the pool' __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipe( [prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) __lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __lowerCamelCase = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
348
0
import math import tensorflow as tf from packaging import version def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[str]: """simple docstring""" __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase__ ) __lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict: """simple docstring""" __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase__ ) __lowerCamelCase = tf.cast(math.pi , x.dtype ) __lowerCamelCase = tf.cast(0.04_47_15 , x.dtype ) __lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase__ , 3 )) )) return x * cdf def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase__ ) return x * tf.tanh(tf.math.softplus(UpperCamelCase__ ) ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase__ ) __lowerCamelCase = tf.cast(0.04_47_15 , x.dtype ) __lowerCamelCase = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase__ ) __lowerCamelCase = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" return tf.clip_by_value(_gelu(UpperCamelCase__ ) , -10 , 10 ) def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=-1 ) -> Tuple: """simple docstring""" __lowerCamelCase , __lowerCamelCase = tf.split(UpperCamelCase__ , 2 , axis=UpperCamelCase__ ) return a * tf.math.sigmoid(UpperCamelCase__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]: """simple docstring""" return tf.keras.activations.gelu(UpperCamelCase__ , approximate=UpperCamelCase__ ) __A = tf.keras.activations.gelu __A = approximate_gelu_wrap else: __A = _gelu __A = _gelu_new __A = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Any: """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
354
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : int = 200 ) -> int: """simple docstring""" __lowerCamelCase = [1, 2, 5, 10, 20, 50, 100, 200] __lowerCamelCase = [0] * (pence + 1) __lowerCamelCase = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(UpperCamelCase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
355
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowerCamelCase = CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = backbone_config.pop('model_type' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def lowercase_ ( self ) -> Dict[str, any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
348
0
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = (KDPMaDiscreteScheduler,) snake_case_ = 10 def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = { 'num_train_timesteps': 1_100, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**lowerCamelCase__ ) return config def lowercase_ ( self ) -> List[str]: '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def lowercase_ ( self ) -> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def lowercase_ ( self ) -> List[Any]: '''simple docstring''' if torch_device == "mps": return __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' if torch_device == "mps": return __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) ) __lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) ) if str(lowerCamelCase__ ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
356
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
357
import sys from collections import defaultdict class __lowerCAmelCase : """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.node_position[vertex] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = pos def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase = 2 * start + 1 else: __lowerCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase = temp, tempa __lowerCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = position[index] while index != 0: __lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase = heap[parent] __lowerCamelCase = position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __lowerCamelCase = parent else: __lowerCamelCase = val __lowerCamelCase = temp self.set_position(lowerCamelCase__ , 0 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = positions[0] __lowerCamelCase = sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = Heap() __lowerCamelCase = [0] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = 1 __lowerCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase = 0 __lowerCamelCase = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): __lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): __lowerCamelCase = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A = int(input("Enter number of edges: ").strip()) __A = defaultdict(list) for _ in range(edges_number): __A = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __A = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) __A = dataset.iloc[:, 1:2].values __A = dataset.iloc[:, 2].values __A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0) __A = PolynomialFeatures(degree=4) __A = poly_reg.fit_transform(X) __A = LinearRegression() pol_reg.fit(X_poly, y) def lowerCamelCase_ ( ) -> str: """simple docstring""" plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' ) plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
358
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case_ = "question" snake_case_ = "context" snake_case_ = "answers" @property def lowercase_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = 42 class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple: '''simple docstring''' super().__init__() __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = num_attention_heads * attention_head_dim __lowerCamelCase = additional_embeddings __lowerCamelCase = time_embed_dim or inner_dim __lowerCamelCase = embedding_proj_dim or embedding_dim __lowerCamelCase = clip_embed_dim or embedding_dim __lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 ) __lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if embedding_proj_norm_type is None: __lowerCamelCase = None elif embedding_proj_norm_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) if encoder_hid_proj_type is None: __lowerCamelCase = None elif encoder_hid_proj_type == "linear": __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) ) if added_emb_type == "prd": __lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) ) elif added_emb_type is None: __lowerCamelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __lowerCamelCase = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , ) for d in range(lowerCamelCase__ ) ] ) if norm_in_type == "layer": __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) elif norm_in_type is None: __lowerCamelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __lowerCamelCase = nn.LayerNorm(lowerCamelCase__ ) __lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 ) causal_attention_mask.triu_(1 ) __lowerCamelCase = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) __lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase_ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return processors def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , 'set_processor' ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): module.set_processor(lowerCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int: '''simple docstring''' __lowerCamelCase = hidden_states.shape[0] __lowerCamelCase = timestep if not torch.is_tensor(lowerCamelCase__ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.time_proj(lowerCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __lowerCamelCase = timesteps_projected.to(dtype=self.dtype ) __lowerCamelCase = self.time_embedding(lowerCamelCase__ ) if self.embedding_proj_norm is not None: __lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ ) __lowerCamelCase = self.embedding_proj(lowerCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) __lowerCamelCase = self.proj_in(lowerCamelCase__ ) __lowerCamelCase = self.positional_embedding.to(hidden_states.dtype ) __lowerCamelCase = [] __lowerCamelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __lowerCamelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __lowerCamelCase = hidden_states[:, None, :] __lowerCamelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 ) additional_embeds.append(lowerCamelCase__ ) __lowerCamelCase = torch.cat( lowerCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __lowerCamelCase = F.pad( lowerCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __lowerCamelCase = hidden_states + positional_embeddings if attention_mask is not None: __lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 __lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) __lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __lowerCamelCase = self.norm_in(lowerCamelCase__ ) for block in self.transformer_blocks: __lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) __lowerCamelCase = self.norm_out(lowerCamelCase__ ) if self.prd_embedding is not None: __lowerCamelCase = hidden_states[:, -1] else: __lowerCamelCase = hidden_states[:, additional_embeddings_len:] __lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
359
import requests __A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None: """simple docstring""" __lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
348
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = KandinskyInpaintPipeline snake_case_ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] snake_case_ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] snake_case_ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return self.time_input_dim @property def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return 100 @property def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) __lowerCamelCase = MultilingualCLIP(lowerCamelCase__ ) __lowerCamelCase = text_encoder.eval() return text_encoder @property def lowercase_ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ ) return model @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_unet __lowerCamelCase = self.dummy_movq __lowerCamelCase = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , ) __lowerCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> str: '''simple docstring''' __lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ ) # create init_image __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((256, 256) ) # create mask __lowerCamelCase = np.ones((64, 64) , dtype=np.floataa ) __lowerCamelCase = 0 if str(lowerCamelCase__ ).startswith('mps' ): __lowerCamelCase = torch.manual_seed(lowerCamelCase__ ) else: __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __lowerCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**lowerCamelCase__ ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __lowerCamelCase = output.images __lowerCamelCase = pipe( **self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __lowerCamelCase = np.ones((768, 768) , dtype=np.floataa ) __lowerCamelCase = 0 __lowerCamelCase = 'a hat' __lowerCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase__ ) __lowerCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __lowerCamelCase , __lowerCamelCase = pipe_prior( lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowerCamelCase = pipeline( lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
360
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
0
"""simple docstring""" def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __lowerCamelCase = str(bin(UpperCamelCase__ ) ) binary_number += "0" * shift_amount return binary_number def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __lowerCamelCase = str(bin(UpperCamelCase__ ) )[2:] if shift_amount >= len(UpperCamelCase__ ): return "0b0" __lowerCamelCase = binary_number[: len(UpperCamelCase__ ) - shift_amount] return "0b" + shifted_binary_number def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str: """simple docstring""" if number >= 0: # Get binary representation of positive number __lowerCamelCase = '0' + str(bin(UpperCamelCase__ ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number __lowerCamelCase = len(bin(UpperCamelCase__ )[3:] ) # Find 2's complement of number __lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:] __lowerCamelCase = ( '1' + '0' * (binary_number_length - len(UpperCamelCase__ )) + binary_number ) if shift_amount >= len(UpperCamelCase__ ): return "0b" + binary_number[0] * len(UpperCamelCase__ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(UpperCamelCase__ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
361
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" if "emb" in name: __lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: __lowerCamelCase = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: __lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: __lowerCamelCase = name.replace('linear1' , 'fc1' ) if "linear2" in name: __lowerCamelCase = name.replace('linear2' , 'fc2' ) if "norm1" in name: __lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: __lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: __lowerCamelCase = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: __lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: __lowerCamelCase = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]: """simple docstring""" __lowerCamelCase = list(state_dict.keys() ) __lowerCamelCase = {} for key in keys: __lowerCamelCase = state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __lowerCamelCase = val[:hidden_size, :] __lowerCamelCase = val[hidden_size : 2 * hidden_size, :] __lowerCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCamelCase = val else: __lowerCamelCase = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __lowerCamelCase = 1024 __lowerCamelCase = 24 __lowerCamelCase = 16 elif checkpoint == "medium": __lowerCamelCase = 1536 __lowerCamelCase = 48 __lowerCamelCase = 24 elif checkpoint == "large": __lowerCamelCase = 2048 __lowerCamelCase = 48 __lowerCamelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) __lowerCamelCase = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]: """simple docstring""" __lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ ) __lowerCamelCase = fairseq_model.lm.state_dict() __lowerCamelCase , __lowerCamelCase = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' ) __lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' ) __lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(UpperCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model __lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('Incorrect shape for logits' ) # now construct the processor __lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) __lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __lowerCamelCase = 2048 __lowerCamelCase = 2048 # set other default generation config params __lowerCamelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCamelCase = True __lowerCamelCase = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): __lowerCamelCase = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = 'sgugger/tiny-distilbert-classification' __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , only_pretrain_model=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ ) __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ , [config] ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ ) __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ , [config] ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ ) __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ , [config] ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = 'patrickvonplaten/t5-tiny-random' __lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ ) __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ , configs=[config] ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCamelCase__ , save_to_csv=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase__ , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(lowerCamelCase__ , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(lowerCamelCase__ , 'env.csv' ) , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) benchmark.run() self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'env.csv' ) ).exists() ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowerCamelCase__ ): self.assertTrue(hasattr(lowerCamelCase__ , 'sequential' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'cumulative' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'current' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase__ , 'log.txt' ) , log_print=lowerCamelCase__ , trace_memory_line_by_line=lowerCamelCase__ , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) __lowerCamelCase = TensorFlowBenchmark(lowerCamelCase__ ) __lowerCamelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'log.txt' ) ).exists() )
362
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''sew-d''' def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = squeeze_factor __lowerCamelCase = max_position_embeddings __lowerCamelCase = position_buckets __lowerCamelCase = share_att_key __lowerCamelCase = relative_attention __lowerCamelCase = norm_rel_ebd __lowerCamelCase = list(lowerCamelCase__ ) __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layer_norm_eps __lowerCamelCase = feature_layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # sequence classification __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size @property def lowercase_ ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = ['''pixel_values'''] def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = 8 , **lowerCamelCase__ , ) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad __lowerCamelCase = pad_size def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> np.ndarray: '''simple docstring''' return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = get_image_size(lowerCamelCase__ ) __lowerCamelCase = (old_height // size + 1) * size - old_height __lowerCamelCase = (old_width // size + 1) * size - old_width return pad(lowerCamelCase__ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> Dict: '''simple docstring''' __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_pad if do_pad is not None else self.do_pad __lowerCamelCase = pad_size if pad_size is not None else self.pad_size __lowerCamelCase = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images] if do_pad: __lowerCamelCase = [self.pad(lowerCamelCase__ , size=lowerCamelCase__ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] __lowerCamelCase = {'pixel_values': images} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
363
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A = logging.get_logger("transformers.models.speecht5") __A = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A = [] __A = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "running_mean": __lowerCamelCase = value elif weight_type == "running_var": __lowerCamelCase = value elif weight_type == "num_batches_tracked": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] if task == "s2t": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2T __lowerCamelCase = IGNORE_KEYS_S2T elif task == "t2s": __lowerCamelCase = None __lowerCamelCase = MAPPING_T2S __lowerCamelCase = IGNORE_KEYS_T2S elif task == "s2s": __lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder __lowerCamelCase = MAPPING_S2S __lowerCamelCase = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(F"""{name} was ignored""" ) continue __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __lowerCamelCase , __lowerCamelCase = key.split('.*.' ) if prefix in name and suffix in name: __lowerCamelCase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: __lowerCamelCase = 'weight' elif "running_mean" in name: __lowerCamelCase = 'running_mean' elif "running_var" in name: __lowerCamelCase = 'running_var' elif "num_batches_tracked" in name: __lowerCamelCase = 'num_batches_tracked' else: __lowerCamelCase = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple: """simple docstring""" if config_path is not None: __lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = SpeechTaConfig() if task == "s2t": __lowerCamelCase = config.max_text_positions __lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": __lowerCamelCase = 1876 __lowerCamelCase = 600 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": __lowerCamelCase = 1876 __lowerCamelCase = config.max_speech_positions __lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) __lowerCamelCase = SpeechTaFeatureExtractor() __lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
348
0
def lowerCamelCase_ ( UpperCamelCase__ : list[int] ) -> int: """simple docstring""" if not numbers: return 0 if not isinstance(UpperCamelCase__ , (list, tuple) ) or not all( isinstance(UpperCamelCase__ , UpperCamelCase__ ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0] for i in range(1 , len(UpperCamelCase__ ) ): # update the maximum and minimum subarray products __lowerCamelCase = numbers[i] if number < 0: __lowerCamelCase , __lowerCamelCase = min_till_now, max_till_now __lowerCamelCase = max(UpperCamelCase__ , max_till_now * number ) __lowerCamelCase = min(UpperCamelCase__ , min_till_now * number ) # update the maximum product found till now __lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ ) return max_prod
364
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __lowerCamelCase = [False] * len(UpperCamelCase__ ) __lowerCamelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ): __lowerCamelCase = True __lowerCamelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
348
0