code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Any = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'xmod' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("en_XX",) , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase ) snake_case__ : Union[str, Any] = vocab_size snake_case__ : List[str] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : Union[str, Any] = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : int = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : List[str] = type_vocab_size snake_case__ : Dict = initializer_range snake_case__ : Tuple = layer_norm_eps snake_case__ : Dict = position_embedding_type snake_case__ : List[Any] = use_cache snake_case__ : Dict = classifier_dropout snake_case__ : Optional[Any] = pre_norm snake_case__ : Any = adapter_reduction_factor snake_case__ : str = adapter_layer_norm snake_case__ : Dict = adapter_reuse_layer_norm snake_case__ : int = ln_before_adapter snake_case__ : Optional[int] = list(lowerCamelCase ) snake_case__ : Any = default_language class snake_case ( __lowerCamelCase ): """simple docstring""" @property def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": snake_case__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case__ : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
694
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _lowerCAmelCase : Optional[Any] = random.Random() def _A ( snake_case__ : Tuple , snake_case__ : int=1.0 , snake_case__ : str=None , snake_case__ : Dict=None ): if rng is None: snake_case__ : Union[str, Any] = global_rng snake_case__ : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=10 , lowerCamelCase=160 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=4000 , lowerCamelCase=False , lowerCamelCase=True , ) -> Dict: """simple docstring""" snake_case__ : Tuple = parent snake_case__ : List[str] = batch_size snake_case__ : Union[str, Any] = min_seq_length snake_case__ : Optional[int] = max_seq_length snake_case__ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case__ : str = padding_value snake_case__ : int = sampling_rate snake_case__ : int = return_attention_mask snake_case__ : Union[str, Any] = do_normalize snake_case__ : int = feature_size snake_case__ : int = chunk_length snake_case__ : Tuple = hop_length def lowercase__ ( self ) -> Tuple: """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self , lowerCamelCase=False , lowerCamelCase=False ) -> Union[str, Any]: """simple docstring""" def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: snake_case__ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case__ : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case__ : int = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = WhisperFeatureExtractionTester(self ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : List[Any] = feat_extract_first.save_pretrained(lowerCamelCase )[0] check_json_file_has_correct_format(lowerCamelCase ) snake_case__ : Tuple = self.feature_extraction_class.from_pretrained(lowerCamelCase ) snake_case__ : Union[str, Any] = feat_extract_first.to_dict() snake_case__ : str = feat_extract_second.to_dict() snake_case__ : Union[str, Any] = feat_extract_first.mel_filters snake_case__ : Any = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Optional[Any] = os.path.join(lowerCamelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(lowerCamelCase ) snake_case__ : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase ) snake_case__ : int = feat_extract_first.to_dict() snake_case__ : Dict = feat_extract_second.to_dict() snake_case__ : Any = feat_extract_first.mel_filters snake_case__ : int = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case__ : Dict = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test feature size snake_case__ : Tuple = feature_extractor(lowerCamelCase , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case__ : int = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case__ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched snake_case__ : Any = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features snake_case__ : Tuple = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case__ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case__ : Union[str, Any] = np.asarray(lowerCamelCase ) snake_case__ : Any = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features snake_case__ : Dict = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test truncation required snake_case__ : str = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case__ : List[str] = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] snake_case__ : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case__ : Any = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs_truncated] snake_case__ : Union[str, Any] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features snake_case__ : Union[str, Any] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def lowercase__ ( self ) -> List[str]: """simple docstring""" import torch snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case__ : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case__ : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case__ : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case__ : List[Any] = ds.sort('''id''' ).select(range(lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Dict = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case__ : Optional[int] = self._load_datasamples(1 ) snake_case__ : List[Any] = WhisperFeatureExtractor() snake_case__ : Tuple = feature_extractor(lowerCamelCase , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : List[str] = self._load_datasamples(1 )[0] snake_case__ : List[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue snake_case__ : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase )[0] self.assertTrue(np.all(np.mean(lowerCamelCase ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase ) - 1 ) < 1E-3 ) )
694
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def _A ( snake_case__ : int = 1_00_00_00 , snake_case__ : int = 10 ): snake_case__ : defaultdict = defaultdict(snake_case__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: snake_case__ : Optional[Any] = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: snake_case__ : Optional[int] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'''{solution() = }''')
694
'''simple docstring''' def _A ( snake_case__ : int = 4_00_00_00 ): snake_case__ : int = [] snake_case__ ,snake_case__ : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) snake_case__ ,snake_case__ : Any = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 128 , lowerCamelCase = 256 , lowerCamelCase = 2_000.0 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 64 , lowerCamelCase = 2048 , lowerCamelCase = 0.1 , ) -> Any: """simple docstring""" super().__init__() snake_case__ : Tuple = nn.Sequential( nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , ) snake_case__ : Any = nn.Embedding(lowerCamelCase , lowerCamelCase ) snake_case__ : Any = False snake_case__ : Any = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) snake_case__ : Dict = nn.Dropout(p=lowerCamelCase ) snake_case__ : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase ): # FiLM conditional T5 decoder snake_case__ : Union[str, Any] = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase ) self.decoders.append(lowerCamelCase ) snake_case__ : List[Any] = TaLayerNorm(lowerCamelCase ) snake_case__ : Union[str, Any] = nn.Dropout(p=lowerCamelCase ) snake_case__ : List[str] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : Any = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str: """simple docstring""" snake_case__ ,snake_case__ ,snake_case__ : int = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. snake_case__ : Dict = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) snake_case__ : str = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) snake_case__ : Optional[Any] = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. snake_case__ : Tuple = torch.broadcast_to( torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) snake_case__ : Union[str, Any] = self.position_encoding(lowerCamelCase ) snake_case__ : Union[str, Any] = self.continuous_inputs_projection(lowerCamelCase ) inputs += position_encodings snake_case__ : Dict = self.dropout(lowerCamelCase ) # decoder: No padding present. snake_case__ : Optional[int] = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. snake_case__ : Tuple = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings snake_case__ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) snake_case__ : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: snake_case__ : List[Any] = lyr( lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0] snake_case__ : Tuple = self.decoder_norm(lowerCamelCase ) snake_case__ : Dict = self.post_dropout(lowerCamelCase ) snake_case__ : List[str] = self.spec_out(lowerCamelCase ) return spec_out class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-6 ) -> List[Any]: """simple docstring""" super().__init__() snake_case__ : List[Any] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.layer[0]( lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , ) if encoder_hidden_states is not None: snake_case__ : List[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to( encoder_hidden_states.dtype ) snake_case__ : int = self.layer[1]( lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , ) # Apply Film Conditional Feed Forward layer snake_case__ : Union[str, Any] = self.layer[-1](lowerCamelCase , lowerCamelCase ) return (hidden_states,) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str: """simple docstring""" super().__init__() snake_case__ : List[Any] = TaLayerNorm(lowerCamelCase ) snake_case__ : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase ) snake_case__ : Tuple = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase ) snake_case__ : Optional[int] = nn.Dropout(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> int: """simple docstring""" snake_case__ : Union[str, Any] = self.layer_norm(lowerCamelCase ) if conditioning_emb is not None: snake_case__ : Optional[Any] = self.FiLMLayer(lowerCamelCase , lowerCamelCase ) # Self-attention block snake_case__ : List[str] = self.attention(lowerCamelCase ) snake_case__ : str = hidden_states + self.dropout(lowerCamelCase ) return hidden_states class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" super().__init__() snake_case__ : Optional[int] = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase ) snake_case__ : Optional[Any] = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase ) snake_case__ : Union[str, Any] = nn.Dropout(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]: """simple docstring""" snake_case__ : Optional[Any] = self.layer_norm(lowerCamelCase ) snake_case__ : str = self.attention( lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) snake_case__ : Union[str, Any] = hidden_states + self.dropout(lowerCamelCase ) return layer_output class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" super().__init__() snake_case__ : int = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase ) snake_case__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase ) snake_case__ : List[str] = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase ) snake_case__ : List[Any] = nn.Dropout(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : Optional[Any] = self.layer_norm(lowerCamelCase ) if conditioning_emb is not None: snake_case__ : Union[str, Any] = self.film(lowerCamelCase , lowerCamelCase ) snake_case__ : Tuple = self.DenseReluDense(lowerCamelCase ) snake_case__ : List[Any] = hidden_states + self.dropout(lowerCamelCase ) return hidden_states class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__() snake_case__ : int = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) snake_case__ : Optional[int] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) snake_case__ : List[str] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) snake_case__ : Tuple = nn.Dropout(lowerCamelCase ) snake_case__ : Any = NewGELUActivation() def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = self.act(self.wi_a(lowerCamelCase ) ) snake_case__ : str = self.wi_a(lowerCamelCase ) snake_case__ : Dict = hidden_gelu * hidden_linear snake_case__ : Dict = self.dropout(lowerCamelCase ) snake_case__ : List[str] = self.wo(lowerCamelCase ) return hidden_states class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=1E-6 ) -> Optional[Any]: """simple docstring""" super().__init__() snake_case__ : int = nn.Parameter(torch.ones(lowerCamelCase ) ) snake_case__ : List[Any] = eps def lowercase__ ( self , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase ) snake_case__ : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: snake_case__ : int = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class snake_case ( nn.Module ): """simple docstring""" def lowercase__ ( self , lowerCamelCase ) -> torch.Tensor: """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCamelCase , 3.0 )) )) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" super().__init__() snake_case__ : Dict = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Union[str, Any] = self.scale_bias(lowerCamelCase ) snake_case__ ,snake_case__ : List[str] = torch.chunk(lowerCamelCase , 2 , -1 ) snake_case__ : Union[str, Any] = x * (1 + scale) + shift return x
694
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
1
'''simple docstring''' def _A ( snake_case__ : int = 50 ): snake_case__ : Optional[int] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'mctct' def __init__( self , lowerCamelCase=8065 , lowerCamelCase=1536 , lowerCamelCase=36 , lowerCamelCase=6144 , lowerCamelCase=4 , lowerCamelCase=384 , lowerCamelCase=920 , lowerCamelCase=1E-5 , lowerCamelCase=0.3 , lowerCamelCase="relu" , lowerCamelCase=0.02 , lowerCamelCase=0.3 , lowerCamelCase=0.3 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0.3 , lowerCamelCase=1 , lowerCamelCase=(7,) , lowerCamelCase=(3,) , lowerCamelCase=80 , lowerCamelCase=1 , lowerCamelCase=None , lowerCamelCase="sum" , lowerCamelCase=False , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase ) snake_case__ : Optional[Any] = vocab_size snake_case__ : int = hidden_size snake_case__ : Any = num_hidden_layers snake_case__ : Union[str, Any] = intermediate_size snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = attention_head_dim snake_case__ : Tuple = max_position_embeddings snake_case__ : List[Any] = layer_norm_eps snake_case__ : int = layerdrop snake_case__ : Optional[int] = hidden_act snake_case__ : List[str] = initializer_range snake_case__ : str = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : Optional[int] = pad_token_id snake_case__ : List[Any] = bos_token_id snake_case__ : str = eos_token_id snake_case__ : Dict = conv_glu_dim snake_case__ : str = conv_dropout snake_case__ : List[str] = num_conv_layers snake_case__ : str = input_feat_per_channel snake_case__ : int = input_channels snake_case__ : Union[str, Any] = conv_channels snake_case__ : Any = ctc_loss_reduction snake_case__ : int = ctc_zero_infinity # prevents config testing fail with exporting to json snake_case__ : List[Any] = list(lowerCamelCase ) snake_case__ : Any = list(lowerCamelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ''' f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
694
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 20 ): snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Union[str, Any] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: _lowerCAmelCase : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
694
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _lowerCAmelCase : int = input("Enter image url: ").strip() print(F'''Downloading image from {url} ...''') _lowerCAmelCase : List[str] = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image _lowerCAmelCase : Any = soup.find("meta", {"property": "og:image"})["content"] _lowerCAmelCase : str = requests.get(image_url).content _lowerCAmelCase : str = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, "wb") as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
694
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (EulerDiscreteScheduler,) _lowerCAmelCase = 1_0 def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Any = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase ) def lowercase__ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Any = self.get_scheduler_config() snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Dict = torch.manual_seed(0 ) snake_case__ : Any = self.dummy_model() snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[Any] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : int = model(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : Optional[int] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Union[str, Any] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[int] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Tuple = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : int = output.prev_sample snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : str = self.get_scheduler_config() snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Dict = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Optional[Any] = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
694
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
1
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _lowerCAmelCase : Optional[Any] = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def _A ( snake_case__ : Union[str, Any] ): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model snake_case__ : Any = list(s_dict.keys() ) for key in keys: snake_case__ : Union[str, Any] = R'''.*/layers_(\d+)''' snake_case__ : Union[str, Any] = key if re.match(snake_case__ , snake_case__ ): snake_case__ : str = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , snake_case__ ) snake_case__ : Dict = R'''(encoder|decoder)\/''' if re.match(snake_case__ , snake_case__ ): snake_case__ : List[str] = re.match(snake_case__ , snake_case__ ).groups() if groups[0] == "encoder": snake_case__ : Optional[int] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , snake_case__ ) snake_case__ : List[Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , snake_case__ ) elif groups[0] == "decoder": snake_case__ : Optional[int] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , snake_case__ ) snake_case__ : Optional[int] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , snake_case__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: snake_case__ : Dict = new_key.replace(snake_case__ , snake_case__ ) print(f'''{key} -> {new_key}''' ) snake_case__ : Tuple = s_dict.pop(snake_case__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case__ : List[str] = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case__ : Any = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: snake_case__ : int = s_dict[key].shape[0] snake_case__ : List[str] = s_dict[key] for idx in range(snake_case__ ): snake_case__ : Optional[Any] = expert_weihts[idx] print(f'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' ) s_dict.pop(snake_case__ ) return s_dict _lowerCAmelCase : int = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def _A ( snake_case__ : List[str] , snake_case__ : int ): # Convert a google style config to the hugging face fromat import regex as re with open(snake_case__ , '''r''' ) as f: snake_case__ : str = f.read() snake_case__ : Optional[Any] = re.findall(R'''(.*) = ([0-9.]*)''' , snake_case__ ) snake_case__ : Optional[Any] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": snake_case__ : List[Any] = float(snake_case__ ) if '''.''' in value else int(snake_case__ ) snake_case__ : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , snake_case__ )[0] snake_case__ : Tuple = str(activation[1] ) snake_case__ : Any = num_experts snake_case__ : Any = SwitchTransformersConfig(**snake_case__ ) return config def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple="./" , snake_case__ : Tuple=8 ): # Initialise PyTorch model print(f'''Loading flax weights from : {flax_checkpoint_path}''' ) snake_case__ : List[str] = checkpoints.load_tax_checkpoint(snake_case__ ) if gin_file is not None: snake_case__ : Any = convert_gin_to_config(snake_case__ , snake_case__ ) else: snake_case__ : Tuple = SwitchTransformersConfig.from_pretrained(snake_case__ ) snake_case__ : str = SwitchTransformersForConditionalGeneration(snake_case__ ) snake_case__ : str = flax_params['''target'''] snake_case__ : Optional[Any] = flatten_dict(snake_case__ , sep='''/''' ) snake_case__ : str = rename_keys(snake_case__ ) snake_case__ : str = unflatten_dict(snake_case__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") _lowerCAmelCase : str = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
694
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
694
1
'''simple docstring''' def _A ( snake_case__ : float , snake_case__ : float ): return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(1_0_0, 0.25) = }''') print(F'''{price_plus_tax(1_25.50, 0.05) = }''')
694
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from functools import lru_cache @lru_cache def _A ( snake_case__ : int ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCAmelCase : Union[str, Any] = "\nimport os\n" _lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n" _lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowerCAmelCase : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' ) with open(snake_case__ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case__ ) snake_case__ : int = get_imports(snake_case__ ) assert parsed_imports == ["os"]
694
1
'''simple docstring''' class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Any = name snake_case__ : Any = value snake_case__ : Union[str, Any] = weight def __repr__( self ) -> Dict: """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowercase__ ( self ) -> Any: """simple docstring""" return self.value def lowercase__ ( self ) -> Tuple: """simple docstring""" return self.name def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.weight def lowercase__ ( self ) -> Dict: """simple docstring""" return self.value / self.weight def _A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ): snake_case__ : int = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : str ): snake_case__ : Dict = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) snake_case__ : Tuple = [] snake_case__ ,snake_case__ : Optional[Any] = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _A ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'markuplm' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Optional[int] = vocab_size snake_case__ : Tuple = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : List[Any] = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : str = type_vocab_size snake_case__ : List[str] = initializer_range snake_case__ : List[str] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : int = classifier_dropout # additional properties snake_case__ : Union[str, Any] = max_depth snake_case__ : Dict = max_xpath_tag_unit_embeddings snake_case__ : Any = max_xpath_subs_unit_embeddings snake_case__ : int = tag_pad_id snake_case__ : Tuple = subs_pad_id snake_case__ : Dict = xpath_unit_hidden_size
694
1
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' def _A ( snake_case__ : float ): return 10 - x * x def _A ( snake_case__ : float , snake_case__ : float ): # Bolzano theory in order to find if there is a root between a and b if equation(snake_case__ ) * equation(snake_case__ ) >= 0: raise ValueError('''Wrong space!''' ) snake_case__ : List[str] = a while (b - a) >= 0.01: # Find middle point snake_case__ : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(snake_case__ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case__ ) * equation(snake_case__ ) < 0: snake_case__ : Dict = c else: snake_case__ : List[str] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
694
1
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) snake_case__ : str = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(lowerCamelCase ) from datasets import load_dataset snake_case__ : int = load_dataset('''nielsr/rvlcdip-demo''' ) snake_case__ : List[str] = dataset['''train'''][0]['''image'''].convert('''RGB''' ) snake_case__ : Union[str, Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : List[str] = model(**lowerCamelCase ) snake_case__ : List[Any] = outputs.logits snake_case__ : Tuple = torch.Size((1, 16) ) self.assertEqual(logits.shape , lowerCamelCase ) snake_case__ : List[str] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=lowerCamelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _lowerCAmelCase : str = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } _lowerCAmelCase : int = { "camembert-base": 5_1_2, } _lowerCAmelCase : str = "▁" class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) snake_case__ : Tuple = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> snake_case__ : Optional[int] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} snake_case__ : Union[str, Any] = len(self.fairseq_tokens_to_ids ) snake_case__ : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) snake_case__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : int = [self.cls_token_id] snake_case__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : Dict = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(lowerCamelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> Dict: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Any = [] snake_case__ : List[str] = '''''' snake_case__ : List[str] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase ) + token snake_case__ : Any = True snake_case__ : str = [] else: current_sub_tokens.append(lowerCamelCase ) snake_case__ : List[str] = False out_string += self.sp_model.decode(lowerCamelCase ) return out_string.strip() def __getstate__( self ) -> str: """simple docstring""" snake_case__ : int = self.__dict__.copy() snake_case__ : Optional[int] = None return state def __setstate__( self , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : Optional[Any] = {} snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (out_vocab_file,)
694
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
1
'''simple docstring''' from __future__ import annotations import math _lowerCAmelCase : List[str] = "2020.9.26" _lowerCAmelCase : Any = "xcodz-dot, cclaus, dhruvmanila" def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): snake_case__ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(snake_case__ ) snake_case__ : List[str] = ((x * distance) / (z + distance)) * scale snake_case__ : List[str] = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('''Axis must be a str''' ) snake_case__ : List[Any] = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): snake_case__ : List[str] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(snake_case__ ) snake_case__ : Any = (angle % 3_60) / 4_50 * 1_80 / math.pi if axis == "z": snake_case__ : Union[str, Any] = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) snake_case__ : Union[str, Any] = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) snake_case__ : Optional[int] = z elif axis == "x": snake_case__ : List[Any] = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) snake_case__ : str = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) snake_case__ : Optional[int] = x elif axis == "y": snake_case__ : List[Any] = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) snake_case__ : List[str] = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) snake_case__ : Optional[Any] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''') print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
694
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'encoder-decoder' _lowerCAmelCase = True def __init__( self , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case__ : List[str] = kwargs.pop('''encoder''' ) snake_case__ : Any = encoder_config.pop('''model_type''' ) snake_case__ : List[str] = kwargs.pop('''decoder''' ) snake_case__ : str = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : str = True @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case__ : Optional[int] = True snake_case__ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = copy.deepcopy(self.__dict__ ) snake_case__ : List[Any] = self.encoder.to_dict() snake_case__ : str = self.decoder.to_dict() snake_case__ : Any = self.__class__.model_type return output
694
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=[1, 1, 2] , lowerCamelCase=1 , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=37 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=512 , lowerCamelCase=3 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=False , ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = parent snake_case__ : Optional[int] = batch_size snake_case__ : List[str] = seq_length snake_case__ : Optional[int] = is_training snake_case__ : Union[str, Any] = use_input_mask snake_case__ : Optional[int] = use_token_type_ids snake_case__ : List[str] = use_labels snake_case__ : int = vocab_size snake_case__ : str = block_sizes snake_case__ : Any = num_decoder_layers snake_case__ : Union[str, Any] = d_model snake_case__ : Union[str, Any] = n_head snake_case__ : Tuple = d_head snake_case__ : List[str] = d_inner snake_case__ : str = hidden_act snake_case__ : Any = hidden_dropout snake_case__ : str = attention_dropout snake_case__ : Optional[int] = activation_dropout snake_case__ : List[Any] = max_position_embeddings snake_case__ : Union[str, Any] = type_vocab_size snake_case__ : str = 2 snake_case__ : Union[str, Any] = num_labels snake_case__ : List[str] = num_choices snake_case__ : Union[str, Any] = scope snake_case__ : Union[str, Any] = initializer_std # Used in the tests to check the size of the first attention layer snake_case__ : Dict = n_head # Used in the tests to check the size of the first hidden state snake_case__ : Optional[int] = self.d_model # Used in the tests to check the number of output hidden states/attentions snake_case__ : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: snake_case__ : Dict = self.num_hidden_layers + 2 def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : List[str] = None if self.use_input_mask: snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Any = None if self.use_token_type_ids: snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ : Tuple = None snake_case__ : Union[str, Any] = None snake_case__ : Any = None if self.use_labels: snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : Optional[int] = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str: """simple docstring""" snake_case__ : int = TFFunnelModel(config=lowerCamelCase ) snake_case__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : Any = model(lowerCamelCase ) snake_case__ : List[str] = [input_ids, input_mask] snake_case__ : str = model(lowerCamelCase ) snake_case__ : Dict = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) snake_case__ : str = False snake_case__ : Optional[int] = TFFunnelModel(config=lowerCamelCase ) snake_case__ : Optional[int] = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) snake_case__ : Any = False snake_case__ : int = TFFunnelModel(config=lowerCamelCase ) snake_case__ : Optional[int] = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[str]: """simple docstring""" snake_case__ : Optional[int] = TFFunnelBaseModel(config=lowerCamelCase ) snake_case__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : Dict = model(lowerCamelCase ) snake_case__ : Dict = [input_ids, input_mask] snake_case__ : Optional[Any] = model(lowerCamelCase ) snake_case__ : Tuple = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) snake_case__ : int = False snake_case__ : int = TFFunnelBaseModel(config=lowerCamelCase ) snake_case__ : int = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) snake_case__ : int = False snake_case__ : Dict = TFFunnelBaseModel(config=lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict: """simple docstring""" snake_case__ : str = TFFunnelForPreTraining(config=lowerCamelCase ) snake_case__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : str = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[Any]: """simple docstring""" snake_case__ : int = TFFunnelForMaskedLM(config=lowerCamelCase ) snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : Dict = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict: """simple docstring""" snake_case__ : Dict = self.num_labels snake_case__ : Optional[int] = TFFunnelForSequenceClassification(config=lowerCamelCase ) snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : Tuple = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : str = self.num_choices snake_case__ : Union[str, Any] = TFFunnelForMultipleChoice(config=lowerCamelCase ) snake_case__ : Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : List[str] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Any = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case__ : Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } snake_case__ : Tuple = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict: """simple docstring""" snake_case__ : str = self.num_labels snake_case__ : Dict = TFFunnelForTokenClassification(config=lowerCamelCase ) snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : str = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Union[str, Any]: """simple docstring""" snake_case__ : Dict = TFFunnelForQuestionAnswering(config=lowerCamelCase ) snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case__ : str = model(lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[int] = self.prepare_config_and_inputs() ( ( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) , ) : str = config_and_inputs snake_case__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) _lowerCAmelCase = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : List[Any] = TFFunnelModelTester(self ) snake_case__ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase ) def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase ) @require_tf class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) _lowerCAmelCase = False _lowerCAmelCase = False def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = TFFunnelModelTester(self , base=lowerCamelCase ) snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase ) def lowercase__ ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} _lowerCAmelCase : Dict = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } _lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4} class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : int = vocab_file snake_case__ : Optional[Any] = monolingual_vocab_file snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility snake_case__ : Dict = {} snake_case__ : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : List[str] = cnt cnt += 1 with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): snake_case__ : Optional[int] = line.strip().split()[0] snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids ) if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : Any = len(self.fairseq_tokens_to_ids ) snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" snake_case__ : int = self.__dict__.copy() snake_case__ : Any = None snake_case__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : Dict = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : str = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[str] = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : Dict = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowerCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: _lowerCAmelCase : Any = None try: import msvcrt except ImportError: _lowerCAmelCase : Tuple = None try: import fcntl except ImportError: _lowerCAmelCase : Any = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _lowerCAmelCase : List[str] = OSError # Data # ------------------------------------------------ _lowerCAmelCase : Optional[Any] = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] _lowerCAmelCase : Union[str, Any] = "3.0.12" _lowerCAmelCase : Dict = None def _A ( ): global _logger snake_case__ : Tuple = _logger or logging.getLogger(__name__ ) return _logger class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase ) -> str: """simple docstring""" snake_case__ : int = lock_file return None def __str__( self ) -> str: """simple docstring""" snake_case__ : Dict = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class snake_case : """simple docstring""" def __init__( self , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : str = lock return None def __enter__( self ) -> Tuple: """simple docstring""" return self.lock def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" self.lock.release() return None class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> int: """simple docstring""" snake_case__ : Tuple = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long snake_case__ : int = self.hash_filename_if_too_long(lowerCamelCase , lowerCamelCase ) # The path to the lock file. snake_case__ : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. snake_case__ : Dict = None # The default timeout value. snake_case__ : Any = timeout # We use this lock primarily for the lock counter. snake_case__ : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. snake_case__ : Any = 0 return None @property def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return self._lock_file @property def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return self._timeout @timeout.setter def lowercase__ ( self , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : Optional[int] = float(lowerCamelCase ) return None def lowercase__ ( self ) -> int: """simple docstring""" raise NotImplementedError() def lowercase__ ( self ) -> Optional[int]: """simple docstring""" raise NotImplementedError() @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self._lock_file_fd is not None def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ) -> str: """simple docstring""" if timeout is None: snake_case__ : Dict = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 snake_case__ : Tuple = id(self ) snake_case__ : Any = self._lock_file snake_case__ : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(lowerCamelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: snake_case__ : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowercase__ ( self , lowerCamelCase=False ) -> str: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: snake_case__ : str = id(self ) snake_case__ : Tuple = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() snake_case__ : List[Any] = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self ) -> Optional[Any]: """simple docstring""" self.acquire() return self def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" self.release() return None def __del__( self ) -> Any: """simple docstring""" self.release(force=lowerCamelCase ) return None def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> str: """simple docstring""" snake_case__ : List[str] = os.path.basename(lowerCamelCase ) if len(lowerCamelCase ) > max_length and max_length > 0: snake_case__ : int = os.path.dirname(lowerCamelCase ) snake_case__ : List[str] = str(hash(lowerCamelCase ) ) snake_case__ : Optional[int] = filename[: max_length - len(lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(lowerCamelCase , lowerCamelCase ) else: return path class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[str]: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase ) snake_case__ : int = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: snake_case__ : Optional[Any] = os.open(self._lock_file , lowerCamelCase ) except OSError: pass else: try: msvcrt.locking(lowerCamelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(lowerCamelCase ) else: snake_case__ : Optional[Any] = fd return None def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : List[str] = self._lock_file_fd snake_case__ : Tuple = None msvcrt.locking(lowerCamelCase , msvcrt.LK_UNLCK , 1 ) os.close(lowerCamelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> Dict: """simple docstring""" snake_case__ : str = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC snake_case__ : Any = os.open(self._lock_file , lowerCamelCase ) try: fcntl.flock(lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(lowerCamelCase ) else: snake_case__ : Optional[Any] = fd return None def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Tuple = self._lock_file_fd snake_case__ : int = None fcntl.flock(lowerCamelCase , fcntl.LOCK_UN ) os.close(lowerCamelCase ) return None class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: snake_case__ : List[Any] = os.open(self._lock_file , lowerCamelCase ) except OSError: pass else: snake_case__ : Any = fd return None def lowercase__ ( self ) -> Optional[int]: """simple docstring""" os.close(self._lock_file_fd ) snake_case__ : Tuple = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _lowerCAmelCase : Tuple = None if msvcrt: _lowerCAmelCase : Tuple = WindowsFileLock elif fcntl: _lowerCAmelCase : List[Any] = UnixFileLock else: _lowerCAmelCase : int = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
694
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowerCAmelCase : Optional[int] = parser.parse_args() _lowerCAmelCase : Union[str, Any] = "cpu" _lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model" _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowerCAmelCase : Optional[Any] = pipe.to(device) # to channels last _lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) _lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last) _lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4) _lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9 _lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status) try: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowerCAmelCase : Tuple = 6_6_6 _lowerCAmelCase : str = torch.Generator(device).manual_seed(seed) _lowerCAmelCase : Dict = {"generator": generator} if args.steps is not None: _lowerCAmelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
694
1
'''simple docstring''' import math def _A ( snake_case__ : list , snake_case__ : int = 0 , snake_case__ : int = 0 ): snake_case__ : Optional[int] = end or len(snake_case__ ) for i in range(snake_case__ , snake_case__ ): snake_case__ : str = i snake_case__ : List[Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: snake_case__ : Optional[Any] = array[temp_index - 1] temp_index -= 1 snake_case__ : Any = temp_index_value return array def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : int ): # Max Heap snake_case__ : Union[str, Any] = index snake_case__ : Tuple = 2 * index + 1 # Left Node snake_case__ : Dict = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: snake_case__ : Optional[Any] = left_index if right_index < heap_size and array[largest] < array[right_index]: snake_case__ : Any = right_index if largest != index: snake_case__ ,snake_case__ : Tuple = array[largest], array[index] heapify(snake_case__ , snake_case__ , snake_case__ ) def _A ( snake_case__ : list ): snake_case__ : Any = len(snake_case__ ) for i in range(n // 2 , -1 , -1 ): heapify(snake_case__ , snake_case__ , snake_case__ ) for i in range(n - 1 , 0 , -1 ): snake_case__ ,snake_case__ : List[Any] = array[0], array[i] heapify(snake_case__ , 0 , snake_case__ ) return array def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ): snake_case__ : Tuple = low snake_case__ : Optional[int] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i snake_case__ ,snake_case__ : Union[str, Any] = array[j], array[i] i += 1 def _A ( snake_case__ : list ): if len(snake_case__ ) == 0: return array snake_case__ : Any = 2 * math.ceil(math.loga(len(snake_case__ ) ) ) snake_case__ : Optional[int] = 16 return intro_sort(snake_case__ , 0 , len(snake_case__ ) , snake_case__ , snake_case__ ) def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): while end - start > size_threshold: if max_depth == 0: return heap_sort(snake_case__ ) max_depth -= 1 snake_case__ : int = median_of_a(snake_case__ , snake_case__ , start + ((end - start) // 2) + 1 , end - 1 ) snake_case__ : Dict = partition(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) intro_sort(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) snake_case__ : Optional[int] = p return insertion_sort(snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : List[str] = input("Enter numbers separated by a comma : ").strip() _lowerCAmelCase : List[str] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
694
'''simple docstring''' import socket def _A ( ): snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) snake_case__ : str = socket.gethostname() snake_case__ : Union[str, Any] = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: snake_case__ : int = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[str] = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _A ( snake_case__ : Union[str, Any] ): # picklable for multiprocessing return x.sum() def _A ( snake_case__ : List[str] ): # picklable for multiprocessing return i + 1 @dataclass class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Any = {} snake_case__ : List[Any] = [] snake_case__ : str = 1 snake_case__ : Optional[int] = [1, 2] snake_case__ : int = {'''a''': 1, '''b''': 2} snake_case__ : int = {'''a''': [1, 2], '''b''': [3, 4]} snake_case__ : Union[str, Any] = {'''a''': {'''1''': 1}, '''b''': 2} snake_case__ : Dict = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} snake_case__ : Dict = {} snake_case__ : Optional[Any] = [] snake_case__ : Optional[int] = 2 snake_case__ : Optional[Any] = [2, 3] snake_case__ : int = {'''a''': 2, '''b''': 3} snake_case__ : Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]} snake_case__ : Tuple = {'''a''': {'''1''': 2}, '''b''': 3} snake_case__ : Optional[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) snake_case__ : List[str] = 2 self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) snake_case__ : str = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} snake_case__ : str = {'''a''': 2, '''b''': 0, '''c''': 2} snake_case__ : Tuple = { '''a''': np.eye(2 ).astype(lowerCamelCase ), '''b''': np.zeros(3 ).astype(lowerCamelCase ), '''c''': np.ones(2 ).astype(lowerCamelCase ), } self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , map_numpy=lowerCamelCase ) , lowerCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase , lowerCamelCase , map_numpy=lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(lowerCamelCase , lowerCamelCase , map_numpy=lowerCamelCase , num_proc=lowerCamelCase ) , lowerCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase , lowerCamelCase , map_numpy=lowerCamelCase , num_proc=lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(lowerCamelCase ): # can't pickle a local lambda map_nested(lambda lowerCamelCase : x + 1 , lowerCamelCase , num_proc=lowerCamelCase ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : str = {'''a''': 1, '''b''': 2} snake_case__ : Any = {'''a''': 3, '''b''': 4} snake_case__ : Any = {'''a''': 5, '''b''': 6} snake_case__ : Union[str, Any] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) , lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" class snake_case : """simple docstring""" _lowerCAmelCase = 'bar' snake_case__ : int = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(lowerCamelCase , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _A ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any ): with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: snake_case__ : Optional[int] = {f'''{i}''': i for i in range(snake_case__ )} snake_case__ : str = map_nested(lambda snake_case__ : x + 10 , snake_case__ , num_proc=snake_case__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class snake_case ( __lowerCamelCase ): """simple docstring""" @require_tf def lowercase__ ( self ) -> int: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers snake_case__ : int = layers.Dense(2 ) def gen_random_output(): snake_case__ : Union[str, Any] = tf.random.uniform((1, 3) ) return model(lowerCamelCase ).numpy() with temp_seed(42 , set_tensorflow=lowerCamelCase ): snake_case__ : Optional[int] = gen_random_output() with temp_seed(42 , set_tensorflow=lowerCamelCase ): snake_case__ : Optional[int] = gen_random_output() snake_case__ : Tuple = gen_random_output() np.testing.assert_equal(lowerCamelCase , lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def lowercase__ ( self ) -> str: """simple docstring""" import torch def gen_random_output(): snake_case__ : Union[str, Any] = torch.nn.Linear(3 , 2 ) snake_case__ : Tuple = torch.rand(1 , 3 ) return model(lowerCamelCase ).detach().numpy() with temp_seed(42 , set_pytorch=lowerCamelCase ): snake_case__ : Optional[int] = gen_random_output() with temp_seed(42 , set_pytorch=lowerCamelCase ): snake_case__ : Optional[int] = gen_random_output() snake_case__ : List[str] = gen_random_output() np.testing.assert_equal(lowerCamelCase , lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def lowercase__ ( self ) -> Tuple: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): snake_case__ : Dict = gen_random_output() with temp_seed(42 ): snake_case__ : List[str] = gen_random_output() snake_case__ : str = gen_random_output() np.testing.assert_equal(lowerCamelCase , lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''' , [{}] ) def _A ( snake_case__ : Union[str, Any] ): snake_case__ : Optional[int] = NestedDataStructure(snake_case__ ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''' , [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ] , ) def _A ( snake_case__ : str , snake_case__ : Dict ): snake_case__ : Optional[Any] = NestedDataStructure(snake_case__ ).flatten() assert output == expected_output def _A ( ): snake_case__ : Any = A(x=1 , y='''foobar''' ) snake_case__ : Tuple = {'''x''': 1, '''y''': '''foobar'''} assert asdict(snake_case__ ) == expected_output snake_case__ : str = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]} snake_case__ : Tuple = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(snake_case__ ) == expected_output with pytest.raises(snake_case__ ): asdict([1, A(x=10 , y='''foo''' )] ) def _A ( snake_case__ : str ): return text.split() def _A ( snake_case__ : str ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _A ( ): with Pool(2 ) as pool: snake_case__ : Tuple = list(iflatmap_unordered(snake_case__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(snake_case__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: snake_case__ : str = list(iflatmap_unordered(snake_case__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(snake_case__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: snake_case__ : int = [] for yield_time, content in iflatmap_unordered( snake_case__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(snake_case__ ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(snake_case__ ) == 4
694
'''simple docstring''' from math import isqrt def _A ( snake_case__ : int ): return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) ) def _A ( snake_case__ : int = 10**6 ): snake_case__ : str = 0 snake_case__ : List[str] = 1 snake_case__ : str = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'marian' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , lowerCamelCase=58101 , lowerCamelCase=None , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=1024 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=58100 , lowerCamelCase=False , lowerCamelCase=58100 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=True , **lowerCamelCase , ) -> List[str]: """simple docstring""" snake_case__ : Union[str, Any] = vocab_size snake_case__ : str = decoder_vocab_size or vocab_size snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Any = d_model snake_case__ : Dict = encoder_ffn_dim snake_case__ : Union[str, Any] = encoder_layers snake_case__ : str = encoder_attention_heads snake_case__ : Tuple = decoder_ffn_dim snake_case__ : Any = decoder_layers snake_case__ : Tuple = decoder_attention_heads snake_case__ : Dict = dropout snake_case__ : Tuple = attention_dropout snake_case__ : Optional[int] = activation_dropout snake_case__ : Tuple = activation_function snake_case__ : Optional[Any] = init_std snake_case__ : Any = encoder_layerdrop snake_case__ : Union[str, Any] = decoder_layerdrop snake_case__ : Union[str, Any] = use_cache snake_case__ : List[str] = encoder_layers snake_case__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True snake_case__ : str = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case__ : Any = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case__ : Tuple = {0: '''batch'''} snake_case__ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case__ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''} snake_case__ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case__ : Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case__ ,snake_case__ : Optional[int] = self.num_layers for i in range(lowerCamelCase ): snake_case__ : str = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case__ : Union[str, Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case__ : Dict = super().outputs else: snake_case__ : Tuple = super(lowerCamelCase , self ).outputs if self.use_past: snake_case__ ,snake_case__ : str = self.num_layers for i in range(lowerCamelCase ): snake_case__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]: """simple docstring""" snake_case__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Generate decoder inputs snake_case__ : str = seq_length if not self.use_past else 1 snake_case__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) snake_case__ : str = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} snake_case__ : int = dict(**lowerCamelCase , **lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case__ ,snake_case__ : List[str] = common_inputs['''input_ids'''].shape snake_case__ : int = common_inputs['''decoder_input_ids'''].shape[1] snake_case__ ,snake_case__ : Optional[Any] = self.num_attention_heads snake_case__ : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case__ : Optional[int] = decoder_seq_length + 3 snake_case__ : int = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case__ : List[str] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 ) snake_case__ : List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case__ ,snake_case__ : Union[str, Any] = self.num_layers snake_case__ : Optional[int] = min(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = max(lowerCamelCase , lowerCamelCase ) - min_num_layers snake_case__ : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), ) ) # TODO: test this. snake_case__ : Dict = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowerCamelCase , lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) ) return common_inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]: """simple docstring""" snake_case__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case__ ,snake_case__ : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case__ : List[Any] = seqlen + 2 snake_case__ ,snake_case__ : int = self.num_layers snake_case__ ,snake_case__ : str = self.num_attention_heads snake_case__ : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case__ : Optional[Any] = common_inputs['''attention_mask'''].dtype snake_case__ : Optional[int] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 ) snake_case__ : Tuple = [ (torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase ) ] return common_inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]: """simple docstring""" snake_case__ : str = compute_effective_axis_dimension( lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case__ : List[Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase ) snake_case__ : Optional[Any] = compute_effective_axis_dimension( lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence snake_case__ : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case__ : Dict = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) ) return common_inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase ) else: snake_case__ : Any = self._generate_dummy_inputs_for_causal_lm( lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase ) return common_inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: snake_case__ : str = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: snake_case__ : List[str] = super(lowerCamelCase , self )._flatten_past_key_values_( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 1E-4
694
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = fa_score( lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase ) return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
694
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
694
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
1
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _lowerCAmelCase : Dict = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__(*lowerCamelCase , **lowerCamelCase ) self.check_model_type(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> str: """simple docstring""" snake_case__ ,snake_case__ : List[Any] = {}, {} if padding is not None: snake_case__ : Dict = padding if truncation is not None: snake_case__ : Union[str, Any] = truncation if top_k is not None: snake_case__ : int = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> Tuple: """simple docstring""" if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ): snake_case__ : Dict = {'''image''': image, '''question''': question} else: snake_case__ : List[str] = image snake_case__ : Any = super().__call__(lowerCamelCase , **lowerCamelCase ) return results def lowercase__ ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ) -> str: """simple docstring""" snake_case__ : Any = load_image(inputs['''image'''] ) snake_case__ : int = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase ) snake_case__ : List[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework ) model_inputs.update(lowerCamelCase ) return model_inputs def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Optional[int] = self.model(**lowerCamelCase ) return model_outputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase=5 ) -> Union[str, Any]: """simple docstring""" if top_k > self.model.config.num_labels: snake_case__ : Optional[int] = self.model.config.num_labels if self.framework == "pt": snake_case__ : Union[str, Any] = model_outputs.logits.sigmoid()[0] snake_case__ ,snake_case__ : Any = probs.topk(lowerCamelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) snake_case__ : List[Any] = scores.tolist() snake_case__ : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
694
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=[10, 20, 30, 40] , lowerCamelCase=[2, 2, 3, 2] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=["stage2", "stage3", "stage4"] , lowerCamelCase=[2, 3, 4] , lowerCamelCase=None , ) -> List[Any]: """simple docstring""" snake_case__ : List[Any] = parent snake_case__ : Any = batch_size snake_case__ : List[Any] = image_size snake_case__ : Optional[int] = num_channels snake_case__ : Any = num_stages snake_case__ : Union[str, Any] = hidden_sizes snake_case__ : Any = depths snake_case__ : Union[str, Any] = is_training snake_case__ : Tuple = use_labels snake_case__ : Union[str, Any] = intermediate_size snake_case__ : Any = hidden_act snake_case__ : List[str] = num_labels snake_case__ : Optional[Any] = initializer_range snake_case__ : Dict = out_features snake_case__ : Dict = out_indices snake_case__ : List[str] = scope def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Tuple = None if self.use_labels: snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : Optional[Any] = self.get_config() return config, pixel_values, labels def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = ConvNextVaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : Tuple = model(lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Dict = ConvNextVaForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Tuple = ConvNextVaBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : Tuple = model(lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ : List[Any] = None snake_case__ : Any = ConvNextVaBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : Any = model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Dict = self.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ : Dict = config_and_inputs snake_case__ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Any = self.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ : Tuple = config_and_inputs snake_case__ : Optional[Any] = {'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _lowerCAmelCase = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = ConvNextVaModelTester(self ) snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowercase__ ( self ) -> List[str]: """simple docstring""" pass def lowercase__ ( self ) -> Tuple: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs_with_labels() snake_case__ : str = True if model_class.__name__ in [ *get_values(lowerCamelCase ), *get_values(lowerCamelCase ), ]: continue snake_case__ : List[Any] = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() snake_case__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) snake_case__ : Tuple = model(**lowerCamelCase ).loss loss.backward() def lowercase__ ( self ) -> int: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: snake_case__ ,snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_with_labels() snake_case__ : Union[str, Any] = False snake_case__ : Dict = True if ( model_class.__name__ in [*get_values(lowerCamelCase ), *get_values(lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue snake_case__ : Any = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.gradient_checkpointing_enable() model.train() snake_case__ : int = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) snake_case__ : Dict = model(**lowerCamelCase ).loss loss.backward() def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ ,snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(lowerCamelCase ) snake_case__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Any = [*signature.parameters.keys()] snake_case__ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ): snake_case__ : Any = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ ,snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : Any = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def lowercase__ ( self ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Tuple = ConvNextVaModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _A ( ): snake_case__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase__ ( self ) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Optional[Any] = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(lowerCamelCase ) snake_case__ : Any = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : Optional[int] = preprocessor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : str = model(**lowerCamelCase ) # verify the logits snake_case__ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) snake_case__ : Dict = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
694
'''simple docstring''' def _A ( snake_case__ : int = 4_00_00_00 ): snake_case__ : int = [] snake_case__ ,snake_case__ : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) snake_case__ ,snake_case__ : Any = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _A ( snake_case__ : Union[str, Any] ): return getitem, k def _A ( snake_case__ : Any , snake_case__ : Dict ): return setitem, k, v def _A ( snake_case__ : int ): return delitem, k def _A ( snake_case__ : Any , snake_case__ : Tuple , *snake_case__ : Union[str, Any] ): try: return fun(snake_case__ , *snake_case__ ), None except Exception as e: return None, e _lowerCAmelCase : str = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) _lowerCAmelCase : str = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] _lowerCAmelCase : int = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] _lowerCAmelCase : List[str] = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] _lowerCAmelCase : str = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] _lowerCAmelCase : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def _A ( snake_case__ : str ): snake_case__ : int = HashMap(initial_block_size=4 ) snake_case__ : List[Any] = {} for _, (fun, *args) in enumerate(snake_case__ ): snake_case__ ,snake_case__ : Tuple = _run_operation(snake_case__ , snake_case__ , *snake_case__ ) snake_case__ ,snake_case__ : Any = _run_operation(snake_case__ , snake_case__ , *snake_case__ ) assert my_res == py_res assert str(snake_case__ ) == str(snake_case__ ) assert set(snake_case__ ) == set(snake_case__ ) assert len(snake_case__ ) == len(snake_case__ ) assert set(my.items() ) == set(py.items() ) def _A ( ): def is_public(snake_case__ : str ) -> bool: return not name.startswith('''_''' ) snake_case__ : Union[str, Any] = {name for name in dir({} ) if is_public(snake_case__ )} snake_case__ : Union[str, Any] = {name for name in dir(HashMap() ) if is_public(snake_case__ )} assert dict_public_names > hash_public_names
694
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _lowerCAmelCase : Optional[Any] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _lowerCAmelCase : Dict = [0, 2_5, 5_0] _lowerCAmelCase : Union[str, Any] = [2_5, 5_0, 7_5] _lowerCAmelCase : int = fuzz.membership.trimf(X, abca) _lowerCAmelCase : Optional[int] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _lowerCAmelCase : Tuple = np.ones(7_5) _lowerCAmelCase : Union[str, Any] = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) _lowerCAmelCase : Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _lowerCAmelCase : Any = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _lowerCAmelCase : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _lowerCAmelCase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _lowerCAmelCase : Dict = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _lowerCAmelCase : Optional[Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _lowerCAmelCase : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _lowerCAmelCase : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("Young") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("Middle aged") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("union") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("intersection") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("complement_a") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("difference a/b") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("alg_sum") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("alg_product") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("bdd_sum") plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title("bdd_difference") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case : """simple docstring""" def __init__( self , lowerCamelCase ) -> None: """simple docstring""" snake_case__ : Any = num_of_nodes snake_case__ : list[list[int]] = [] snake_case__ : dict[int, int] = {} def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None: """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowercase__ ( self , lowerCamelCase ) -> None: """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: snake_case__ : str = self.find_component(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None: """simple docstring""" if component_size[u_node] <= component_size[v_node]: snake_case__ : str = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase ) elif component_size[u_node] >= component_size[v_node]: snake_case__ : Union[str, Any] = self.find_component(lowerCamelCase ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase ) def lowercase__ ( self ) -> None: """simple docstring""" snake_case__ : str = [] snake_case__ : int = 0 snake_case__ : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) snake_case__ : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: snake_case__ ,snake_case__ ,snake_case__ : Tuple = edge snake_case__ : List[Any] = self.m_component[u] snake_case__ : List[str] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): snake_case__ : Any = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase , lowerCamelCase ): snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = edge snake_case__ : int = self.m_component[u] snake_case__ : List[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' ) num_of_components -= 1 snake_case__ : Dict = [-1] * self.m_num_of_nodes print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' ) def _A ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 20 ): snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Union[str, Any] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: _lowerCAmelCase : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
694
1
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (EulerDiscreteScheduler,) _lowerCAmelCase = 1_0 def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Any = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase ) def lowercase__ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Any = self.get_scheduler_config() snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Dict = torch.manual_seed(0 ) snake_case__ : Any = self.dummy_model() snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[Any] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : int = model(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : Optional[int] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Union[str, Any] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[int] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Tuple = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : int = output.prev_sample snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : str = self.get_scheduler_config() snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Dict = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Optional[Any] = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
694
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : Optional[int] = [ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] _lowerCAmelCase : Any = { "vocab_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json", "funnel-transformer/small-base": ( "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json" ), "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json", "funnel-transformer/large-base": ( "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json" ), "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json" ), }, } _lowerCAmelCase : Union[str, Any] = {F'''funnel-transformer/{name}''': 5_1_2 for name in _model_names} _lowerCAmelCase : Union[str, Any] = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names} class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION _lowerCAmelCase = FunnelTokenizer _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = 2 def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase="##" , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , clean_text=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , wordpieces_prefix=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase ) != tokenize_chinese_chars ): snake_case__ : int = getattr(lowerCamelCase , normalizer_state.pop('''type''' ) ) snake_case__ : Tuple = do_lower_case snake_case__ : Any = strip_accents snake_case__ : Optional[Any] = tokenize_chinese_chars snake_case__ : Optional[int] = normalizer_class(**lowerCamelCase ) snake_case__ : Optional[int] = do_lower_case def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict: """simple docstring""" snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[Any] = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" snake_case__ : List[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
694
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : List[str] = { "configuration_x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", ], "processing_x_clip": ["XCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Dict = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[Any] = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Tuple = "▁" _lowerCAmelCase : str = {"vocab_file": "sentencepiece.bpe.model"} _lowerCAmelCase : List[Any] = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } _lowerCAmelCase : Dict = { "facebook/xglm-564M": 2_0_4_8, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case__ : List[Any] = 7 snake_case__ : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] snake_case__ : int = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) snake_case__ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case__ : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case__ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} snake_case__ : Optional[Any] = len(self.sp_model ) snake_case__ : List[str] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowerCamelCase ) snake_case__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: """simple docstring""" snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : Dict = None snake_case__ : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : List[Any] = {} snake_case__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case__ : str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[str] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case__ : List[str] = self.sp_model.PieceToId(lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Dict = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Union[str, Any] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : List[str] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (out_vocab_file,)
694
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCAmelCase : Union[str, Any] = "\nimport os\n" _lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n" _lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowerCAmelCase : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' ) with open(snake_case__ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case__ ) snake_case__ : int = get_imports(snake_case__ ) assert parsed_imports == ["os"]
694
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase : Any = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class snake_case ( unittest.TestCase , __lowerCamelCase ): """simple docstring""" def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : Optional[int] = load_tool('''text-question-answering''' ) self.tool.setup() snake_case__ : Optional[Any] = load_tool('''text-question-answering''' , remote=lowerCamelCase ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : str = self.tool(lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = self.remote_tool(lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : int = self.tool(text=lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Tuple = self.remote_tool(text=lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' )
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'markuplm' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Optional[int] = vocab_size snake_case__ : Tuple = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : List[Any] = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : str = type_vocab_size snake_case__ : List[str] = initializer_range snake_case__ : List[str] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : int = classifier_dropout # additional properties snake_case__ : Union[str, Any] = max_depth snake_case__ : Dict = max_xpath_tag_unit_embeddings snake_case__ : Any = max_xpath_subs_unit_embeddings snake_case__ : int = tag_pad_id snake_case__ : Tuple = subs_pad_id snake_case__ : Dict = xpath_unit_hidden_size
694
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ) -> Any: """simple docstring""" snake_case__ : Tuple = parent snake_case__ : Optional[int] = batch_size snake_case__ : int = seq_length snake_case__ : Optional[int] = is_training snake_case__ : int = use_attention_mask snake_case__ : int = use_token_type_ids snake_case__ : Dict = use_labels snake_case__ : Optional[int] = vocab_size snake_case__ : List[str] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Optional[int] = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : Dict = type_vocab_size snake_case__ : Optional[int] = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : Optional[int] = num_choices def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Any = None if self.use_attention_mask: snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : str = None if self.use_token_type_ids: snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ : str = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Optional[int] = self.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[Any] = config_and_inputs snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = self.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = config_and_inputs snake_case__ : List[str] = True snake_case__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : List[str] = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case__ : int = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase ) snake_case__ : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase ) snake_case__ : Any = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) snake_case__ : Optional[Any] = model(lowerCamelCase )[0] snake_case__ : str = [1, 11, 50265] self.assertEqual(list(output.shape ) , lowerCamelCase ) # compare the actual values for a slice. snake_case__ : List[str] = np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase ) snake_case__ : List[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) snake_case__ : Union[str, Any] = model(lowerCamelCase )[0] # compare the actual values for a slice. snake_case__ : Tuple = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
694
'''simple docstring''' def _A ( snake_case__ : float ): return 10 - x * x def _A ( snake_case__ : float , snake_case__ : float ): # Bolzano theory in order to find if there is a root between a and b if equation(snake_case__ ) * equation(snake_case__ ) >= 0: raise ValueError('''Wrong space!''' ) snake_case__ : List[str] = a while (b - a) >= 0.01: # Find middle point snake_case__ : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(snake_case__ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case__ ) * equation(snake_case__ ) < 0: snake_case__ : Dict = c else: snake_case__ : List[str] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
694
1
'''simple docstring''' import argparse import os import re _lowerCAmelCase : List[str] = "src/diffusers" # Pattern that looks at the indentation in a line. _lowerCAmelCase : Optional[int] = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. _lowerCAmelCase : Optional[int] = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _lowerCAmelCase : int = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. _lowerCAmelCase : List[Any] = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _lowerCAmelCase : Any = re.compile(R"\[([^\]]+)\]") def _A ( snake_case__ : Optional[Any] ): snake_case__ : Union[str, Any] = _re_indent.search(snake_case__ ) return "" if search is None else search.groups()[0] def _A ( snake_case__ : Any , snake_case__ : List[Any]="" , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None ): snake_case__ : Dict = 0 snake_case__ : Optional[Any] = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(snake_case__ ): index += 1 snake_case__ : Tuple = ['''\n'''.join(lines[:index] )] else: snake_case__ : Optional[int] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : int = [lines[index]] index += 1 while index < len(snake_case__ ) and (end_prompt is None or not lines[index].startswith(snake_case__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(snake_case__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(snake_case__ ) ) if index < len(snake_case__ ) - 1: snake_case__ : Any = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append('''\n'''.join(snake_case__ ) ) snake_case__ : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(snake_case__ ) > 0: blocks.append('''\n'''.join(snake_case__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(snake_case__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _A ( snake_case__ : Optional[Any] ): def _inner(snake_case__ : List[Any] ): return key(snake_case__ ).lower().replace('''_''' , '''''' ) return _inner def _A ( snake_case__ : str , snake_case__ : Optional[Any]=None ): # If no key is provided, we use a noop. def noop(snake_case__ : Optional[Any] ): return x if key is None: snake_case__ : Union[str, Any] = noop # Constants are all uppercase, they go first. snake_case__ : Any = [obj for obj in objects if key(snake_case__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : Optional[int] = [obj for obj in objects if key(snake_case__ )[0].isupper() and not key(snake_case__ ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : Union[str, Any] = [obj for obj in objects if not key(snake_case__ )[0].isupper()] snake_case__ : Tuple = ignore_underscore(snake_case__ ) return sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ ) def _A ( snake_case__ : List[Any] ): # This inner function sort imports between [ ]. def _replace(snake_case__ : Dict ): snake_case__ : Optional[int] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' snake_case__ : Optional[int] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : Any = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(snake_case__ )] ) + "]" snake_case__ : Optional[int] = import_statement.split('''\n''' ) if len(snake_case__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Optional[Any] = 2 if lines[1].strip() == '''[''' else 1 snake_case__ : Dict = [(i, _re_strip_line.search(snake_case__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(snake_case__ , key=lambda snake_case__ : x[1] ) snake_case__ : Optional[int] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(snake_case__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : int = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : Optional[Any] = keys[:-1] snake_case__ : List[Any] = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(snake_case__ )] ) return "\n".join(snake_case__ ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Any = _re_bracket_content.sub(_replace , snake_case__ ) return import_statement def _A ( snake_case__ : List[str] , snake_case__ : Dict=True ): with open(snake_case__ , '''r''' ) as f: snake_case__ : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : int = split_code_in_indented_blocks( snake_case__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(snake_case__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : str = main_blocks[block_idx] snake_case__ : List[Any] = block.split('''\n''' ) # Get to the start of the imports. snake_case__ : List[str] = 0 while line_idx < len(snake_case__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : str = len(snake_case__ ) else: line_idx += 1 if line_idx >= len(snake_case__ ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] ) snake_case__ : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[Any] = split_code_in_indented_blocks(snake_case__ , indent_level=snake_case__ ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Any = [(pattern.search(snake_case__ ).groups()[0] if pattern.search(snake_case__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Any = [(i, key) for i, key in enumerate(snake_case__ ) if key is not None] snake_case__ : List[Any] = [x[0] for x in sorted(snake_case__ , key=lambda snake_case__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : Union[str, Any] = 0 snake_case__ : Tuple = [] for i in range(len(snake_case__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: snake_case__ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(snake_case__ ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Optional[int] = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(snake_case__ ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(snake_case__ , '''w''' ) as f: f.write('''\n'''.join(snake_case__ ) ) def _A ( snake_case__ : Union[str, Any]=True ): snake_case__ : Optional[Any] = [] for root, _, files in os.walk(snake_case__ ): if "__init__.py" in files: snake_case__ : List[Any] = sort_imports(os.path.join(snake_case__ , '''__init__.py''' ) , check_only=snake_case__ ) if result: snake_case__ : Optional[int] = [os.path.join(snake_case__ , '''__init__.py''' )] if len(snake_case__ ) > 0: raise ValueError(f'''Would overwrite {len(snake_case__ )} files, run `make style`.''' ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") _lowerCAmelCase : Any = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
1
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list ): if not nums: raise ValueError('''List is empty''' ) return sum(snake_case__ ) / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'philschmid/bart-large-cnn-samsum' _lowerCAmelCase = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) _lowerCAmelCase = 'summarizer' _lowerCAmelCase = AutoTokenizer _lowerCAmelCase = AutoModelForSeqaSeqLM _lowerCAmelCase = ['text'] _lowerCAmelCase = ['text'] def lowercase__ ( self , lowerCamelCase ) -> Tuple: """simple docstring""" return self.pre_processor(lowerCamelCase , return_tensors='''pt''' , truncation=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.model.generate(**lowerCamelCase )[0] def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
694
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'encoder-decoder' _lowerCAmelCase = True def __init__( self , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case__ : List[str] = kwargs.pop('''encoder''' ) snake_case__ : Any = encoder_config.pop('''model_type''' ) snake_case__ : List[str] = kwargs.pop('''decoder''' ) snake_case__ : str = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : str = True @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case__ : Optional[int] = True snake_case__ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = copy.deepcopy(self.__dict__ ) snake_case__ : List[Any] = self.encoder.to_dict() snake_case__ : str = self.decoder.to_dict() snake_case__ : Any = self.__class__.model_type return output
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { "configuration_table_transformer": [ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[str] = [ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys _lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} _lowerCAmelCase : Dict = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } _lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4} class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : int = vocab_file snake_case__ : Optional[Any] = monolingual_vocab_file snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility snake_case__ : Dict = {} snake_case__ : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : List[str] = cnt cnt += 1 with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): snake_case__ : Optional[int] = line.strip().split()[0] snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids ) if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : Any = len(self.fairseq_tokens_to_ids ) snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" snake_case__ : int = self.__dict__.copy() snake_case__ : Any = None snake_case__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : Dict = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : str = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[str] = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : Dict = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowerCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowerCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(4_2) _lowerCAmelCase : Any = "sshleifer/student_marian_en_ro_6_1" _lowerCAmelCase : Dict = "sshleifer/tiny-mbart" @require_torch class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , ) -> List[Any]: """simple docstring""" snake_case__ : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , extra_args_str=lowerCamelCase , predict_with_generate=lowerCamelCase , do_train=lowerCamelCase , do_eval=lowerCamelCase , do_predict=lowerCamelCase , ) snake_case__ : Any = TrainerState.load_from_json(os.path.join(lowerCamelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return snake_case__ : List[str] = [log for log in logs if '''eval_loss''' in log.keys()] snake_case__ : List[str] = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats snake_case__ : Optional[int] = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , lowerCamelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def lowercase__ ( self ) -> int: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def lowercase__ ( self ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase ) @require_torch_multi_gpu def lowercase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ) -> Optional[int]: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=lowerCamelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick( distributed=lowerCamelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=lowerCamelCase ) @require_apex @require_torch_gpu def lowercase__ ( self ) -> List[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def lowercase__ ( self , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : Tuple = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } snake_case__ : str = experiments[experiment_id] snake_case__ : Union[str, Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} snake_case__ : Dict = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**lowerCamelCase , extra_args_str=data['''extra_args_str'''] ) snake_case__ : Optional[Any] = len(re.findall(lowerCamelCase , cl.err ) ) self.assertEqual(lowerCamelCase , data['''n_matches'''] ) @slow def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = self.run_trainer( eval_steps=2 , max_len=128 , model_name=lowerCamelCase , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowerCamelCase , ) # Check metrics snake_case__ : Union[str, Any] = TrainerState.load_from_json(os.path.join(lowerCamelCase , '''trainer_state.json''' ) ).log_history snake_case__ : Dict = [log for log in logs if '''eval_loss''' in log.keys()] snake_case__ : int = eval_metrics[0] snake_case__ : Optional[int] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , lowerCamelCase ) # test if do_predict saves generations and metrics snake_case__ : str = os.listdir(lowerCamelCase ) snake_case__ : Tuple = {os.path.basename(lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def lowercase__ ( self ) -> List[Any]: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(lowerCamelCase ) -> Tuple[int, float]: snake_case__ : List[str] = '''--skip_memory_metrics 0''' snake_case__ : int = self.run_trainer( max_len=128 , model_name=lowerCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCamelCase , distributed=lowerCamelCase , extra_args_str=lowerCamelCase , do_eval=lowerCamelCase , do_predict=lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics snake_case__ : List[str] = TrainerState.load_from_json(Path(lowerCamelCase , '''trainer_state.json''' ) ).log_history snake_case__ : List[Any] = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 ) snake_case__ : Any = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 ) snake_case__ : List[str] = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss snake_case__ ,snake_case__ ,snake_case__ : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) snake_case__ ,snake_case__ ,snake_case__ : int = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) snake_case__ : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb snake_case__ : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig snake_case__ : Union[str, Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb snake_case__ : List[str] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings snake_case__ : Any = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowerCamelCase , lowerCamelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( lowerCamelCase , lowerCamelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( lowerCamelCase , lowerCamelCase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 3E-3 , lowerCamelCase = "adafactor" , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = None , ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' snake_case__ : Tuple = self.get_auto_remove_tmp_dir() snake_case__ : List[str] = f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() snake_case__ : Any = f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(lowerCamelCase )} '''.split() snake_case__ : List[str] = ''' --do_predict '''.split() snake_case__ : List[str] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: snake_case__ : Any = get_gpu_count() snake_case__ : str = get_torch_dist_unique_port() snake_case__ : int = f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() snake_case__ : List[Any] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase , env=self.get_env() ) else: snake_case__ : List[str] = ['''run_translation.py'''] + args with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ): main() return output_dir
694
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowerCAmelCase : Optional[int] = parser.parse_args() _lowerCAmelCase : Union[str, Any] = "cpu" _lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model" _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowerCAmelCase : Optional[Any] = pipe.to(device) # to channels last _lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) _lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last) _lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4) _lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9 _lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status) try: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowerCAmelCase : Tuple = 6_6_6 _lowerCAmelCase : str = torch.Generator(device).manual_seed(seed) _lowerCAmelCase : Dict = {"generator": generator} if args.steps is not None: _lowerCAmelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
694
1
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCamelCase , '''depth_multiplier''' ) ) class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=1280 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = parent snake_case__ : Dict = batch_size snake_case__ : int = num_channels snake_case__ : Optional[int] = image_size snake_case__ : Union[str, Any] = depth_multiplier snake_case__ : Union[str, Any] = depth_divisible_by snake_case__ : Tuple = min_depth snake_case__ : Optional[int] = expand_ratio snake_case__ : Dict = tf_padding snake_case__ : int = output_stride snake_case__ : Dict = first_layer_is_expansion snake_case__ : List[str] = finegrained_output snake_case__ : List[Any] = hidden_act snake_case__ : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) snake_case__ : Union[str, Any] = classifier_dropout_prob snake_case__ : Any = use_labels snake_case__ : Any = is_training snake_case__ : List[str] = num_labels snake_case__ : Tuple = initializer_range snake_case__ : Optional[int] = scope def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase__ ( self ) -> int: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = MobileNetVaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : List[str] = model(lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.num_labels snake_case__ : Dict = MobileNetVaForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : Dict = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : Optional[Any] = self.num_labels snake_case__ : Optional[int] = MobileNetVaForSemanticSegmentation(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() snake_case__ : List[str] = model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) snake_case__ : Dict = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : List[str] = self.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = config_and_inputs snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) _lowerCAmelCase = ( { 'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification, 'image-segmentation': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Any = MobileNetVaModelTester(self ) snake_case__ : Dict = MobileNetVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def lowercase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def lowercase__ ( self ) -> List[str]: """simple docstring""" pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" pass def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[int] = model_class(lowerCamelCase ) snake_case__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ): snake_case__ : Tuple = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) snake_case__ : Optional[Any] = outputs.hidden_states snake_case__ : List[Any] = 16 self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) snake_case__ ,snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[str] = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[str] = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @slow def lowercase__ ( self ) -> int: """simple docstring""" for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[str] = MobileNetVaModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _A ( ): snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase__ ( self ) -> int: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(lowerCamelCase ) snake_case__ : Tuple = self.default_image_processor snake_case__ : Any = prepare_img() snake_case__ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Any = model(**lowerCamelCase ) # verify the logits snake_case__ : Tuple = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) snake_case__ : Optional[Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) snake_case__ : List[Any] = model.to(lowerCamelCase ) snake_case__ : str = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) snake_case__ : List[Any] = prepare_img() snake_case__ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : List[Any] = model(**lowerCamelCase ) snake_case__ : Any = outputs.logits # verify the logits snake_case__ : Optional[int] = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , lowerCamelCase ) snake_case__ : Optional[Any] = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
694
'''simple docstring''' import socket def _A ( ): snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) snake_case__ : str = socket.gethostname() snake_case__ : Union[str, Any] = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: snake_case__ : int = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
694
1
'''simple docstring''' import random def _A ( snake_case__ : int , snake_case__ : float , snake_case__ : bool = False ): snake_case__ : dict = {i: [] for i in range(snake_case__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(snake_case__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(snake_case__ ): for j in range(i + 1 , snake_case__ ): if random.random() < probability: graph[i].append(snake_case__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(snake_case__ ) return graph def _A ( snake_case__ : int ): return { i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ ) } if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case : """simple docstring""" def __init__( self , lowerCamelCase = 6 ) -> None: """simple docstring""" snake_case__ : Node | None = None snake_case__ : Node | None = None self.create_linked_list(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> None: """simple docstring""" snake_case__ : Any = Node() snake_case__ : List[Any] = current_node snake_case__ : Optional[Any] = current_node snake_case__ : int = current_node for _ in range(1 , lowerCamelCase ): snake_case__ : Dict = Node() snake_case__ : Optional[int] = current_node snake_case__ : str = previous_node snake_case__ : Dict = current_node snake_case__ : List[Any] = self.front snake_case__ : Tuple = previous_node def lowercase__ ( self ) -> bool: """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def lowercase__ ( self ) -> Any | None: """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def lowercase__ ( self , lowerCamelCase ) -> None: """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): snake_case__ : Any = self.rear.next if self.rear: snake_case__ : Union[str, Any] = data def lowercase__ ( self ) -> Any: """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: snake_case__ : Optional[Any] = self.front.data snake_case__ : List[str] = None return data snake_case__ : int = self.front snake_case__ : str = old_front.next snake_case__ : Tuple = old_front.data snake_case__ : List[Any] = None return data def lowercase__ ( self ) -> None: """simple docstring""" if self.is_empty(): raise Exception('''Empty Queue''' ) def lowercase__ ( self ) -> None: """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class snake_case : """simple docstring""" def __init__( self ) -> None: """simple docstring""" snake_case__ : Any | None = None snake_case__ : Node | None = None snake_case__ : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from math import isqrt def _A ( snake_case__ : int ): return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) ) def _A ( snake_case__ : int = 10**6 ): snake_case__ : str = 0 snake_case__ : List[str] = 1 snake_case__ : str = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : Any = logging.get_logger(__name__) set_seed(7_7_0) _lowerCAmelCase : Optional[int] = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } _lowerCAmelCase : int = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } _lowerCAmelCase : int = os.path.dirname(os.path.abspath(__file__)) _lowerCAmelCase : List[Any] = os.path.join(os.path.expanduser("~"), ".cache") _lowerCAmelCase : List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") def _A ( snake_case__ : Any , snake_case__ : Any=False ): snake_case__ : Dict = model_type if use_small: key += "_small" return os.path.join(snake_case__ , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def _A ( snake_case__ : str , snake_case__ : str ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) hf_hub_download(repo_id=snake_case__ , filename=snake_case__ , local_dir=snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[str]=False , snake_case__ : Optional[int]="text" ): if model_type == "text": snake_case__ : Any = BarkSemanticModel snake_case__ : int = BarkSemanticConfig snake_case__ : int = BarkSemanticGenerationConfig elif model_type == "coarse": snake_case__ : List[str] = BarkCoarseModel snake_case__ : Dict = BarkCoarseConfig snake_case__ : Union[str, Any] = BarkCoarseGenerationConfig elif model_type == "fine": snake_case__ : Dict = BarkFineModel snake_case__ : Union[str, Any] = BarkFineConfig snake_case__ : Union[str, Any] = BarkFineGenerationConfig else: raise NotImplementedError() snake_case__ : List[Any] = f'''{model_type}_small''' if use_small else model_type snake_case__ : Tuple = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(snake_case__ ): logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) snake_case__ : Union[str, Any] = torch.load(snake_case__ , map_location=snake_case__ ) # this is a hack snake_case__ : Any = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: snake_case__ : Dict = model_args['''vocab_size'''] snake_case__ : Any = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments snake_case__ : Tuple = model_args.pop('''n_head''' ) snake_case__ : Dict = model_args.pop('''n_embd''' ) snake_case__ : Dict = model_args.pop('''n_layer''' ) snake_case__ : str = ConfigClass(**checkpoint['''model_args'''] ) snake_case__ : Dict = ModelClass(config=snake_case__ ) snake_case__ : List[Any] = GenerationConfigClass() snake_case__ : Optional[int] = model_generation_config snake_case__ : List[Any] = checkpoint['''model'''] # fixup checkpoint snake_case__ : int = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(snake_case__ ): # replace part of the key with corresponding layer name in HF implementation snake_case__ : str = k[len(snake_case__ ) :] for old_layer_name in new_layer_name_dict: snake_case__ : Union[str, Any] = new_k.replace(snake_case__ , new_layer_name_dict[old_layer_name] ) snake_case__ : Any = state_dict.pop(snake_case__ ) snake_case__ : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() ) snake_case__ : Tuple = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} snake_case__ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() ) snake_case__ : List[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(snake_case__ ) != 0: raise ValueError(f'''extra keys found: {extra_keys}''' ) if len(snake_case__ ) != 0: raise ValueError(f'''missing keys: {missing_keys}''' ) model.load_state_dict(snake_case__ , strict=snake_case__ ) snake_case__ : Optional[int] = model.num_parameters(exclude_embeddings=snake_case__ ) snake_case__ : Union[str, Any] = checkpoint['''best_val_loss'''].item() logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case__ , 3 )} loss''' ) model.eval() model.to(snake_case__ ) del checkpoint, state_dict return model def _A ( snake_case__ : str , snake_case__ : Union[str, Any]=False , snake_case__ : int="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() snake_case__ : List[Any] = '''cpu''' # do conversion on cpu snake_case__ : int = _get_ckpt_path(snake_case__ , use_small=snake_case__ ) snake_case__ : str = _load_model(snake_case__ , snake_case__ , model_type=snake_case__ , use_small=snake_case__ ) # load bark initial model snake_case__ : str = _bark_load_model(snake_case__ , '''cpu''' , model_type=snake_case__ , use_small=snake_case__ ) if model_type == "text": snake_case__ : Tuple = bark_model['''model'''] if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model snake_case__ : Optional[Any] = 5 snake_case__ : Optional[int] = 10 if model_type in ["text", "coarse"]: snake_case__ : Tuple = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int ) snake_case__ : Dict = bark_model(snake_case__ )[0] snake_case__ : str = model(snake_case__ ) # take last logits snake_case__ : Union[str, Any] = output_new_model_total.logits[:, [-1], :] else: snake_case__ : Union[str, Any] = 3 snake_case__ : List[str] = 8 snake_case__ : Optional[int] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) snake_case__ : Tuple = model(snake_case__ , snake_case__ ) snake_case__ : List[Any] = bark_model(snake_case__ , snake_case__ ) snake_case__ : List[str] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('''initial and new outputs are not equal''' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) def _A ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str , ): snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ ) snake_case__ : Any = BarkSemanticConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) ) snake_case__ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) ) snake_case__ : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) ) snake_case__ : Optional[int] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) snake_case__ : Union[str, Any] = BarkSemanticModel.from_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BarkCoarseModel.from_pretrained(snake_case__ ) snake_case__ : Optional[int] = BarkFineModel.from_pretrained(snake_case__ ) snake_case__ : Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) snake_case__ : List[str] = BarkConfig.from_sub_model_configs( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) snake_case__ : Dict = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) snake_case__ : Any = BarkModel(snake_case__ ) snake_case__ : Optional[int] = semantic snake_case__ : str = coarseAcoustic snake_case__ : Dict = fineAcoustic snake_case__ : Optional[Any] = codec snake_case__ : Optional[Any] = bark_generation_config Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) bark.save_pretrained(snake_case__ , repo_id=snake_case__ , push_to_hub=snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("model_type", type=str, help="text, coarse or fine.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.") _lowerCAmelCase : int = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
694
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = fa_score( lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase ) return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
694
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def _A ( snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ): # Initialise PyTorch model snake_case__ : str = TaConfig.from_json_file(snake_case__ ) print(f'''Building PyTorch model from configuration: {config}''' ) snake_case__ : Optional[int] = TaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
694
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
1
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path _lowerCAmelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) _lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] _lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS} _lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def _A ( snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): snake_case__ : str = "" snake_case__ : int snake_case__ : int snake_case__ : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): snake_case__ : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def _A ( snake_case__ : list[int] ): snake_case__ : list[str] = [] for key in product(snake_case__ , repeat=3 ): snake_case__ : Union[str, Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def _A ( snake_case__ : list[str] , snake_case__ : str ): return [possible for possible in possibles if common_word in possible.lower()] def _A ( snake_case__ : str = "p059_cipher.txt" ): snake_case__ : list[int] snake_case__ : list[str] snake_case__ : str snake_case__ : str snake_case__ : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='''utf-8''' ) snake_case__ : Optional[Any] = [int(snake_case__ ) for number in data.strip().split(''',''' )] snake_case__ : List[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: snake_case__ : Optional[int] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break snake_case__ : Any = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
694
'''simple docstring''' def _A ( snake_case__ : int = 4_00_00_00 ): snake_case__ : int = [] snake_case__ ,snake_case__ : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) snake_case__ ,snake_case__ : Any = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva _lowerCAmelCase : Union[str, Any] = "" _lowerCAmelCase : Dict = "" _lowerCAmelCase : str = "" _lowerCAmelCase : int = 1 # (0 is vertical, 1 is horizontal) def _A ( ): snake_case__ ,snake_case__ : Tuple = get_dataset(snake_case__ , snake_case__ ) print('''Processing...''' ) snake_case__ ,snake_case__ ,snake_case__ : Dict = update_image_and_anno(snake_case__ , snake_case__ , snake_case__ ) for index, image in enumerate(snake_case__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ : Dict = random_chars(32 ) snake_case__ : str = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] snake_case__ : str = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(snake_case__ )} with {file_name}''' ) snake_case__ : Optional[int] = [] for anno in new_annos[index]: snake_case__ : Union[str, Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(snake_case__ ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Union[str, Any] = [] snake_case__ : Any = [] for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ): snake_case__ : List[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(snake_case__ ) as in_file: snake_case__ : Optional[int] = in_file.readlines() snake_case__ : List[Any] = os.path.join(snake_case__ , f'''{label_name}.jpg''' ) snake_case__ : Optional[Any] = [] for obj_list in obj_lists: snake_case__ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(snake_case__ ) labels.append(snake_case__ ) return img_paths, labels def _A ( snake_case__ : list , snake_case__ : list , snake_case__ : int = 1 ): snake_case__ : Any = [] snake_case__ : Union[str, Any] = [] snake_case__ : Any = [] for idx in range(len(snake_case__ ) ): snake_case__ : Union[str, Any] = [] snake_case__ : List[Any] = img_list[idx] path_list.append(snake_case__ ) snake_case__ : Any = anno_list[idx] snake_case__ : List[str] = cva.imread(snake_case__ ) if flip_type == 1: snake_case__ : List[Any] = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: snake_case__ : Union[str, Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ : Optional[int] = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: snake_case__ : str = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(snake_case__ ) new_imgs_list.append(snake_case__ ) return new_imgs_list, new_annos_lists, path_list def _A ( snake_case__ : int = 32 ): assert number_char > 1, "The number of character should greater than 1" snake_case__ : str = ascii_lowercase + digits return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) ) if __name__ == "__main__": main() print("DONE ✅")
694
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
1
'''simple docstring''' from collections.abc import Callable import numpy as np def _A ( snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Dict = int(np.ceil((x_end - xa) / step_size ) ) snake_case__ : Dict = np.zeros((n + 1,) ) snake_case__ : int = ya snake_case__ : Optional[int] = xa for k in range(snake_case__ ): snake_case__ : List[str] = y[k] + step_size * ode_func(snake_case__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' def _A ( snake_case__ : int , snake_case__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) snake_case__ : List[Any] = str(bin(snake_case__ ) )[2:] # remove the leading "0b" snake_case__ : Any = str(bin(snake_case__ ) )[2:] # remove the leading "0b" snake_case__ : str = max(len(snake_case__ ) , len(snake_case__ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 20 ): snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Union[str, Any] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: _lowerCAmelCase : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
694
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (UniPCMultistepScheduler,) _lowerCAmelCase = (('num_inference_steps', 2_5),) def lowercase__ ( self , **lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Optional[Any] = dict(self.forward_default_kwargs ) snake_case__ : Dict = kwargs.pop('''num_inference_steps''' , lowerCamelCase ) snake_case__ : Union[str, Any] = self.dummy_sample snake_case__ : int = 0.1 * sample snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case__ : Tuple = self.get_scheduler_config(**lowerCamelCase ) snake_case__ : Tuple = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals snake_case__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(lowerCamelCase ) new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals snake_case__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case__ ,snake_case__ : int = sample, sample for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ): snake_case__ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample snake_case__ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> str: """simple docstring""" snake_case__ : Optional[int] = dict(self.forward_default_kwargs ) snake_case__ : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase ) snake_case__ : List[str] = self.dummy_sample snake_case__ : Union[str, Any] = 0.1 * sample snake_case__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case__ : Union[str, Any] = self.get_scheduler_config() snake_case__ : Any = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) snake_case__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) snake_case__ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) snake_case__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case__ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample snake_case__ : Dict = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]: """simple docstring""" if scheduler is None: snake_case__ : int = self.scheduler_classes[0] snake_case__ : Dict = self.get_scheduler_config(**lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler_class(**lowerCamelCase ) snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : int = self.get_scheduler_config(**lowerCamelCase ) snake_case__ : str = scheduler_class(**lowerCamelCase ) snake_case__ : Dict = 10 snake_case__ : Union[str, Any] = self.dummy_model() snake_case__ : Dict = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample return sample def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Dict = dict(self.forward_default_kwargs ) snake_case__ : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase ) for scheduler_class in self.scheduler_classes: snake_case__ : List[Any] = self.get_scheduler_config() snake_case__ : str = scheduler_class(**lowerCamelCase ) snake_case__ : List[str] = self.dummy_sample snake_case__ : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase , '''set_timesteps''' ): scheduler.set_timesteps(lowerCamelCase ) elif num_inference_steps is not None and not hasattr(lowerCamelCase , '''set_timesteps''' ): snake_case__ : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] snake_case__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order] snake_case__ : List[Any] = scheduler.timesteps[5] snake_case__ : Any = scheduler.timesteps[6] snake_case__ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) snake_case__ : Tuple = self.full_loop(scheduler=lowerCamelCase ) snake_case__ : List[Any] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1E-3 snake_case__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) snake_case__ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config ) snake_case__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config ) snake_case__ : int = UniPCMultistepScheduler.from_config(scheduler.config ) snake_case__ : List[str] = self.full_loop(scheduler=lowerCamelCase ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1E-3 def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> int: """simple docstring""" self.check_over_configs(thresholding=lowerCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , solver_order=lowerCamelCase , solver_type=lowerCamelCase , ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> List[str]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , ) snake_case__ : Any = self.full_loop( solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , ) assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self ) -> Dict: """simple docstring""" self.check_over_configs(lower_order_final=lowerCamelCase ) self.check_over_configs(lower_order_final=lowerCamelCase ) def lowercase__ ( self ) -> Dict: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = self.full_loop() snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_464 ) < 1E-3 def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : List[str] = self.full_loop(prediction_type='''v_prediction''' ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 0.1_014 ) < 1E-3 def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 ) snake_case__ : str = scheduler_class(**lowerCamelCase ) snake_case__ : Optional[Any] = 10 snake_case__ : Optional[Any] = self.dummy_model() snake_case__ : Union[str, Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample assert sample.dtype == torch.floataa def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" for scheduler_class in self.scheduler_classes: snake_case__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
694
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (EulerDiscreteScheduler,) _lowerCAmelCase = 1_0 def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Any = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase ) def lowercase__ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Any = self.get_scheduler_config() snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Dict = torch.manual_seed(0 ) snake_case__ : Any = self.dummy_model() snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[Any] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : int = model(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : Optional[int] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Union[str, Any] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[int] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Tuple = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : int = output.prev_sample snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : str = self.get_scheduler_config() snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Dict = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Optional[Any] = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
694
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : Dict = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } _lowerCAmelCase : List[Any] = { "google/realm-cc-news-pretrained-embedder": 5_1_2, "google/realm-cc-news-pretrained-encoder": 5_1_2, "google/realm-cc-news-pretrained-scorer": 5_1_2, "google/realm-cc-news-pretrained-openqa": 5_1_2, "google/realm-orqa-nq-openqa": 5_1_2, "google/realm-orqa-nq-reader": 5_1_2, "google/realm-orqa-wq-openqa": 5_1_2, "google/realm-orqa-wq-reader": 5_1_2, } _lowerCAmelCase : List[str] = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = RealmTokenizer def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Any: """simple docstring""" super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , ) snake_case__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase ) != tokenize_chinese_chars ): snake_case__ : Dict = getattr(lowerCamelCase , normalizer_state.pop('''type''' ) ) snake_case__ : List[Any] = do_lower_case snake_case__ : str = strip_accents snake_case__ : Optional[int] = tokenize_chinese_chars snake_case__ : str = normalizer_class(**lowerCamelCase ) snake_case__ : Optional[int] = do_lower_case def lowercase__ ( self , lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : List[Any] = PaddingStrategy.MAX_LENGTH snake_case__ : Any = text snake_case__ : List[str] = kwargs.pop('''text_pair''' , lowerCamelCase ) snake_case__ : Optional[int] = kwargs.pop('''return_tensors''' , lowerCamelCase ) snake_case__ : Tuple = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(lowerCamelCase ): if batch_text_pair is not None: snake_case__ : Optional[Any] = batch_text_pair[idx] else: snake_case__ : Dict = None snake_case__ : Union[str, Any] = super().__call__(lowerCamelCase , lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ) snake_case__ : Union[str, Any] = encoded_candidates.get('''input_ids''' ) snake_case__ : Union[str, Any] = encoded_candidates.get('''attention_mask''' ) snake_case__ : Any = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(lowerCamelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowerCamelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowerCamelCase ) snake_case__ : Dict = {key: item for key, item in output_data.items() if len(lowerCamelCase ) != 0} return BatchEncoding(lowerCamelCase , tensor_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : Union[str, Any] = [self.sep_token_id] snake_case__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" snake_case__ : Tuple = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
694
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
1
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 1_00 ): return sum(map(snake_case__ , str(factorial(snake_case__ ) ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
694
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
694
1
'''simple docstring''' def _A ( snake_case__ : int ): if n == 1 or not isinstance(snake_case__ , snake_case__ ): return 0 elif n == 2: return 1 else: snake_case__ : Tuple = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _A ( snake_case__ : int ): snake_case__ : Any = 0 snake_case__ : Union[str, Any] = 2 while digits < n: index += 1 snake_case__ : Optional[int] = len(str(fibonacci(snake_case__ ) ) ) return index def _A ( snake_case__ : int = 10_00 ): return fibonacci_digits_index(snake_case__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' def _A ( snake_case__ : list[list[int | float]] ): snake_case__ : int = len(snake_case__ ) snake_case__ : Union[str, Any] = len(matrix[0] ) snake_case__ : List[Any] = min(snake_case__ , snake_case__ ) for row in range(snake_case__ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , snake_case__ ): snake_case__ : str = matrix[col][row] / matrix[row][row] for i in range(snake_case__ , snake_case__ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows snake_case__ : List[Any] = True for i in range(row + 1 , snake_case__ ): if matrix[i][row] != 0: snake_case__ ,snake_case__ : List[Any] = matrix[i], matrix[row] snake_case__ : Dict = False break if reduce: rank -= 1 for i in range(snake_case__ ): snake_case__ : int = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCAmelCase : Union[str, Any] = "\nimport os\n" _lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n" _lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowerCAmelCase : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' ) with open(snake_case__ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case__ ) snake_case__ : int = get_imports(snake_case__ ) assert parsed_imports == ["os"]
694
1
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'markuplm' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Optional[int] = vocab_size snake_case__ : Tuple = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : List[Any] = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : str = type_vocab_size snake_case__ : List[str] = initializer_range snake_case__ : List[str] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : int = classifier_dropout # additional properties snake_case__ : Union[str, Any] = max_depth snake_case__ : Dict = max_xpath_tag_unit_embeddings snake_case__ : Any = max_xpath_subs_unit_embeddings snake_case__ : int = tag_pad_id snake_case__ : Tuple = subs_pad_id snake_case__ : Dict = xpath_unit_hidden_size
694
1
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
694
'''simple docstring''' def _A ( snake_case__ : float ): return 10 - x * x def _A ( snake_case__ : float , snake_case__ : float ): # Bolzano theory in order to find if there is a root between a and b if equation(snake_case__ ) * equation(snake_case__ ) >= 0: raise ValueError('''Wrong space!''' ) snake_case__ : List[str] = a while (b - a) >= 0.01: # Find middle point snake_case__ : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(snake_case__ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case__ ) * equation(snake_case__ ) < 0: snake_case__ : Dict = c else: snake_case__ : List[str] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
694
1
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 1 @register_to_config def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = None ) -> Dict: """simple docstring""" self.set_timesteps(lowerCamelCase ) # standard deviation of the initial noise distribution snake_case__ : List[str] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. snake_case__ : Union[str, Any] = 4 # running values snake_case__ : List[Any] = [] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Any: """simple docstring""" snake_case__ : Tuple = num_inference_steps snake_case__ : Any = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] snake_case__ : Optional[int] = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: snake_case__ : Tuple = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: snake_case__ : Optional[int] = torch.sin(steps * math.pi / 2 ) ** 2 snake_case__ : Optional[int] = (1.0 - self.betas**2) ** 0.5 snake_case__ : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] snake_case__ : Optional[int] = timesteps.to(lowerCamelCase ) snake_case__ : Union[str, Any] = [] def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) snake_case__ : Optional[int] = (self.timesteps == timestep).nonzero().item() snake_case__ : Dict = timestep_index + 1 snake_case__ : str = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(lowerCamelCase ) if len(self.ets ) == 1: snake_case__ : Any = self.ets[-1] elif len(self.ets ) == 2: snake_case__ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: snake_case__ : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: snake_case__ : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) snake_case__ : Dict = self._get_prev_sample(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> torch.FloatTensor: """simple docstring""" return sample def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : str = self.alphas[timestep_index] snake_case__ : List[str] = self.betas[timestep_index] snake_case__ : Optional[Any] = self.alphas[prev_timestep_index] snake_case__ : Optional[int] = self.betas[prev_timestep_index] snake_case__ : Any = (sample - sigma * ets) / max(lowerCamelCase , 1E-8 ) snake_case__ : List[Any] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self ) -> Any: """simple docstring""" return self.config.num_train_timesteps
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
1
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowerCAmelCase : Optional[int] = parser.parse_args() _lowerCAmelCase : Union[str, Any] = "cpu" _lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model" _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowerCAmelCase : Optional[Any] = pipe.to(device) # to channels last _lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) _lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last) _lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4) _lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9 _lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status) try: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowerCAmelCase : Tuple = 6_6_6 _lowerCAmelCase : str = torch.Generator(device).manual_seed(seed) _lowerCAmelCase : Dict = {"generator": generator} if args.steps is not None: _lowerCAmelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
694
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
1
'''simple docstring''' class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[str] = None snake_case__ : Dict = None snake_case__ : Union[str, Any] = graph self._normalize_graph(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[int] = len(lowerCamelCase ) snake_case__ : Union[str, Any] = None def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" if sources is int: snake_case__ : Optional[Any] = [sources] if sinks is int: snake_case__ : List[str] = [sinks] if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0: return snake_case__ : Any = sources[0] snake_case__ : Optional[Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(lowerCamelCase ) > 1 or len(lowerCamelCase ) > 1: snake_case__ : List[str] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) snake_case__ : str = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: snake_case__ : List[Any] = max_input_flow snake_case__ : Any = 0 snake_case__ : str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: snake_case__ : str = max_input_flow snake_case__ : Union[str, Any] = size - 1 def lowercase__ ( self ) -> Dict: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def lowercase__ ( self , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : str = algorithm(self ) class snake_case : """simple docstring""" def __init__( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = flow_network snake_case__ : List[str] = flow_network.verticesCount snake_case__ : Optional[int] = flow_network.sourceIndex snake_case__ : Optional[int] = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that snake_case__ : Tuple = flow_network.graph snake_case__ : List[Any] = False def lowercase__ ( self ) -> Tuple: """simple docstring""" if not self.executed: self._algorithm() snake_case__ : Tuple = True def lowercase__ ( self ) -> int: """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(lowerCamelCase ) # use this to save your result snake_case__ : Optional[int] = -1 def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase ) -> List[Any]: """simple docstring""" super().__init__(lowerCamelCase ) snake_case__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )] snake_case__ : Dict = [0] * self.verticies_count snake_case__ : Optional[Any] = [0] * self.verticies_count def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Union[str, Any] = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule snake_case__ : List[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list snake_case__ : Union[str, Any] = 0 while i < len(lowerCamelCase ): snake_case__ : Optional[Any] = vertices_list[i] snake_case__ : Tuple = self.heights[vertex_index] self.process_vertex(lowerCamelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(lowerCamelCase ) ) snake_case__ : Optional[Any] = 0 else: i += 1 snake_case__ : Optional[Any] = sum(self.preflow[self.source_index] ) def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(lowerCamelCase , lowerCamelCase ) self.relabel(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" snake_case__ : Tuple = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): snake_case__ : List[str] = self.heights[to_index] if min_height is not None: snake_case__ : Tuple = min_height + 1 if __name__ == "__main__": _lowerCAmelCase : Any = [0] _lowerCAmelCase : int = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] _lowerCAmelCase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network _lowerCAmelCase : str = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate _lowerCAmelCase : Optional[int] = flow_network.find_maximum_flow() print(F'''maximum flow is {maximum_flow}''')
694
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'encoder-decoder' _lowerCAmelCase = True def __init__( self , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case__ : List[str] = kwargs.pop('''encoder''' ) snake_case__ : Any = encoder_config.pop('''model_type''' ) snake_case__ : List[str] = kwargs.pop('''decoder''' ) snake_case__ : str = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : str = True @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case__ : Optional[int] = True snake_case__ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = copy.deepcopy(self.__dict__ ) snake_case__ : List[Any] = self.encoder.to_dict() snake_case__ : str = self.decoder.to_dict() snake_case__ : Any = self.__class__.model_type return output
694
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[Any] = 3 snake_case__ : int = 250 snake_case__ : Any = ids_tensor((batch_size, length) , lowerCamelCase ) snake_case__ : Any = torch.ones((batch_size, length) , device=lowerCamelCase , dtype=torch.float ) / length return input_ids, scores def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ ,snake_case__ : Tuple = self._get_tensors(5 ) snake_case__ : int = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : List[str] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = MaxLengthCriteria(max_length=10 ) snake_case__ ,snake_case__ : str = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : int = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : str = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) snake_case__ ,snake_case__ : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ ,snake_case__ : Any = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ : List[str] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ ,snake_case__ : List[Any] = self._get_tensors(5 ) snake_case__ : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) ) snake_case__ : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) ) def lowercase__ ( self ) -> str: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCamelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) snake_case__ : Optional[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCamelCase ) , 1 )
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} _lowerCAmelCase : Dict = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } _lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4} class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : int = vocab_file snake_case__ : Optional[Any] = monolingual_vocab_file snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility snake_case__ : Dict = {} snake_case__ : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : List[str] = cnt cnt += 1 with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): snake_case__ : Optional[int] = line.strip().split()[0] snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids ) if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : Any = len(self.fairseq_tokens_to_ids ) snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" snake_case__ : int = self.__dict__.copy() snake_case__ : Any = None snake_case__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : Dict = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : str = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[str] = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : Dict = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowerCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _A ( snake_case__ : str ): if isinstance(snake_case__ , collections.abc.Iterable ): return x return (x, x) @require_tf class snake_case : """simple docstring""" def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" pass def lowercase__ ( self ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self ) -> int: """simple docstring""" pass def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(lowerCamelCase ) snake_case__ : List[Any] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Any: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase ) snake_case__ : Union[str, Any] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ ,snake_case__ : Any = self.get_vision_text_model(lowerCamelCase , lowerCamelCase ) snake_case__ : Tuple = {'''vision_model''': vision_model, '''text_model''': text_model} snake_case__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) snake_case__ : int = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ ,snake_case__ : Dict = self.get_vision_text_model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase ) snake_case__ : Tuple = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase ) snake_case__ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) snake_case__ : Dict = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) snake_case__ : List[Any] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase ) snake_case__ : str = after_output[0].numpy() snake_case__ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase , 1E-5 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ ,snake_case__ : Any = self.get_vision_text_model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase ) snake_case__ : int = model( input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase ) snake_case__ : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : List[Any] = to_atuple(vision_model.config.image_size ) snake_case__ : List[str] = to_atuple(vision_model.config.patch_size ) snake_case__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Dict = output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : Optional[int] = np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase , lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowerCamelCase ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : List[str] = self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @slow def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ ,snake_case__ : str = self.get_pretrained_model_and_inputs() snake_case__ : Optional[Any] = model_a(**lowerCamelCase ) snake_case__ : Any = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) snake_case__ : Any = model_a(**lowerCamelCase ) snake_case__ : Tuple = after_outputs[0].numpy() snake_case__ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase , 1E-5 ) @require_tf class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' ) snake_case__ : int = 13 snake_case__ : Union[str, Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : List[str] = random_attention_mask([batch_size, 4] ) snake_case__ : int = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = TFViTModel(lowerCamelCase , name='''vision_model''' ) snake_case__ : str = TFBertModel(lowerCamelCase , name='''text_model''' ) return vision_model, text_model def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Optional[int] = TFViTModelTester(self ) snake_case__ : Union[str, Any] = TFBertModelTester(self ) snake_case__ : List[Any] = vit_model_tester.prepare_config_and_inputs() snake_case__ : Tuple = bert_model_tester.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ : List[Any] = vision_config_and_inputs ( ( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) , ) : Optional[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' ) snake_case__ : int = 13 snake_case__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : int = random_attention_mask([batch_size, 4] ) snake_case__ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase ) snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase ) snake_case__ : Optional[Any] = model( input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase ) snake_case__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) snake_case__ : Union[str, Any] = to_atuple(vision_model.config.image_size ) snake_case__ : Optional[int] = to_atuple(vision_model.config.patch_size ) snake_case__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : str = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : Any = TFDeiTModel(lowerCamelCase , name='''vision_model''' ) snake_case__ : str = TFRobertaModel(lowerCamelCase , name='''text_model''' ) return vision_model, text_model def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = TFDeiTModelTester(self ) snake_case__ : Union[str, Any] = TFRobertaModelTester(self ) snake_case__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() snake_case__ : Optional[int] = bert_model_tester.prepare_config_and_inputs() snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = vision_config_and_inputs ( ( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' ) snake_case__ : Union[str, Any] = 13 snake_case__ : Any = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : int = random_attention_mask([batch_size, 4] ) snake_case__ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Any = TFCLIPVisionModel(lowerCamelCase , name='''vision_model''' ) snake_case__ : Dict = TFBertModel(lowerCamelCase , name='''text_model''' ) return vision_model, text_model def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : int = TFCLIPVisionModelTester(self ) snake_case__ : Tuple = TFBertModelTester(self ) snake_case__ : int = clip_model_tester.prepare_config_and_inputs() snake_case__ : int = bert_model_tester.prepare_config_and_inputs() snake_case__ ,snake_case__ : Union[str, Any] = vision_config_and_inputs ( ( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) ,( snake_case__ ) , ) : Union[str, Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=lowerCamelCase ) snake_case__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) snake_case__ : Any = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' ) snake_case__ : int = model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) snake_case__ : Optional[int] = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCamelCase , atol=1E-3 ) )
694
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowerCAmelCase : Optional[int] = parser.parse_args() _lowerCAmelCase : Union[str, Any] = "cpu" _lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model" _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowerCAmelCase : Optional[Any] = pipe.to(device) # to channels last _lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) _lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last) _lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4) _lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9 _lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status) try: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowerCAmelCase : Tuple = 6_6_6 _lowerCAmelCase : str = torch.Generator(device).manual_seed(seed) _lowerCAmelCase : Dict = {"generator": generator} if args.steps is not None: _lowerCAmelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
694
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
'''simple docstring''' import socket def _A ( ): snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) snake_case__ : str = socket.gethostname() snake_case__ : Union[str, Any] = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: snake_case__ : int = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
694
1
'''simple docstring''' import numpy class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase ) -> None: """simple docstring""" snake_case__ : List[str] = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. snake_case__ : str = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. snake_case__ : Any = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. snake_case__ : Optional[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. snake_case__ : Tuple = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. snake_case__ : Any = numpy.zeros(output_array.shape ) def lowercase__ ( self ) -> numpy.ndarray: """simple docstring""" snake_case__ : Tuple = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. snake_case__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. snake_case__ : Tuple = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowercase__ ( self ) -> None: """simple docstring""" snake_case__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) snake_case__ : List[str] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) snake_case__ : Optional[int] = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None: """simple docstring""" for iteration in range(1 , iterations + 1 ): snake_case__ : int = self.feedforward() self.back_propagation() if give_loss: snake_case__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : List[str] = input_arr snake_case__ : Tuple = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) snake_case__ : List[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) snake_case__ : Union[str, Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _A ( snake_case__ : numpy.ndarray ): return 1 / (1 + numpy.exp(-value )) def _A ( snake_case__ : numpy.ndarray ): return (value) * (1 - (value)) def _A ( ): snake_case__ : List[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. snake_case__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. snake_case__ : Union[str, Any] = TwoHiddenLayerNeuralNetwork( input_array=snake_case__ , output_array=snake_case__ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=snake_case__ , iterations=10 , give_loss=snake_case__ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
694
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' # flake8: noqa # Lint as: python3 _lowerCAmelCase : Optional[int] = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
694
'''simple docstring''' from math import isqrt def _A ( snake_case__ : int ): return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) ) def _A ( snake_case__ : int = 10**6 ): snake_case__ : str = 0 snake_case__ : List[str] = 1 snake_case__ : str = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
694
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> Union[str, Any]: """simple docstring""" super().__init__( features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Any = Generator( cache_dir=lowerCamelCase , features=lowerCamelCase , generator=lowerCamelCase , gen_kwargs=lowerCamelCase , **lowerCamelCase , ) def lowercase__ ( self ) -> str: """simple docstring""" if self.streaming: snake_case__ : Optional[int] = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: snake_case__ : List[str] = None snake_case__ : Union[str, Any] = None snake_case__ : str = None snake_case__ : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , ) snake_case__ : str = self.builder.as_dataset( split='''train''' , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
694
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = fa_score( lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase ) return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
694
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _A ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : int ): if isinstance(snake_case__ , snake_case__ ): snake_case__ : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ ) else: snake_case__ : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ ) for i, tensor in enumerate(snake_case__ ): if padding_side == "right": if isinstance(snake_case__ , snake_case__ ): snake_case__ : Union[str, Any] = tensor[:sequence_length] else: snake_case__ : Optional[Any] = tensor[:sequence_length] else: if isinstance(snake_case__ , snake_case__ ): snake_case__ : List[Any] = tensor[:sequence_length] else: snake_case__ : Any = tensor[:sequence_length] return out_tensor.tolist() def _A ( snake_case__ : List[Any] ): snake_case__ : Dict = ord(snake_case__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True snake_case__ : List[Any] = unicodedata.category(snake_case__ ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = True _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = -1_0_0 _lowerCAmelCase = "pt" def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" import torch snake_case__ : int = '''label''' if '''label''' in features[0].keys() else '''labels''' snake_case__ : Optional[int] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None snake_case__ : Tuple = self.tokenizer.pad( lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch snake_case__ : Optional[int] = torch.tensor(batch['''entity_ids'''] ).shape[1] snake_case__ : Any = self.tokenizer.padding_side if padding_side == "right": snake_case__ : Dict = [ list(lowerCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase )) for label in labels ] else: snake_case__ : Dict = [ [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase )) + list(lowerCamelCase ) for label in labels ] snake_case__ : str = [feature['''ner_tags'''] for feature in features] snake_case__ : List[str] = padding_tensor(lowerCamelCase , -1 , lowerCamelCase , lowerCamelCase ) snake_case__ : str = [feature['''original_entity_spans'''] for feature in features] snake_case__ : Union[str, Any] = padding_tensor(lowerCamelCase , (-1, -1) , lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = {k: torch.tensor(lowerCamelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
694
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
1
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _A ( snake_case__ : List[str] , snake_case__ : str ): snake_case__ : Tuple = XCLIPTextConfig() # derive patch size from model name snake_case__ : Optional[Any] = model_name.find('''patch''' ) snake_case__ : Dict = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) snake_case__ : List[str] = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ ) if "large" in model_name: snake_case__ : Union[str, Any] = 7_68 snake_case__ : List[str] = 30_72 snake_case__ : Any = 12 snake_case__ : Union[str, Any] = 10_24 snake_case__ : Tuple = 40_96 snake_case__ : Tuple = 16 snake_case__ : Dict = 24 snake_case__ : Optional[Any] = 7_68 snake_case__ : Union[str, Any] = 30_72 if model_name == "xclip-large-patch14-16-frames": snake_case__ : Dict = 3_36 snake_case__ : List[Any] = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "large" in model_name: snake_case__ : Any = 7_68 return config def _A ( snake_case__ : Optional[int] ): # text encoder if name == "token_embedding.weight": snake_case__ : List[Any] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": snake_case__ : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: snake_case__ : List[Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: snake_case__ : Any = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: snake_case__ : int = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: snake_case__ : str = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): snake_case__ : Tuple = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: snake_case__ : Optional[Any] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: snake_case__ : Optional[Any] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": snake_case__ : Dict = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": snake_case__ : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): snake_case__ : Optional[int] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: snake_case__ : int = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: snake_case__ : Union[str, Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: snake_case__ : List[str] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: snake_case__ : Dict = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: snake_case__ : Optional[Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: snake_case__ : Dict = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: snake_case__ : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": snake_case__ : Optional[Any] = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): snake_case__ : int = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): snake_case__ : str = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def _A ( snake_case__ : int , snake_case__ : List[str] ): for key in orig_state_dict.copy().keys(): snake_case__ : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "attn.in_proj" in key: snake_case__ : Union[str, Any] = key.split('''.''' ) if key.startswith('''visual''' ): snake_case__ : Dict = key_split[3] snake_case__ : Dict = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case__ : Union[str, Any] = val[ :dim, : ] snake_case__ : List[str] = val[ dim : dim * 2, : ] snake_case__ : List[Any] = val[ -dim:, : ] else: snake_case__ : List[str] = val[ :dim ] snake_case__ : str = val[ dim : dim * 2 ] snake_case__ : Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case__ : Dict = val[ :dim, : ] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Optional[Any] = val[ -dim:, : ] else: snake_case__ : str = val[:dim] snake_case__ : Any = val[ dim : dim * 2 ] snake_case__ : Optional[int] = val[-dim:] elif key.startswith('''mit''' ): snake_case__ : Any = key_split[2] snake_case__ : Any = config.vision_config.mit_hidden_size if "weight" in key: snake_case__ : Any = val[:dim, :] snake_case__ : List[Any] = val[dim : dim * 2, :] snake_case__ : str = val[-dim:, :] else: snake_case__ : Optional[Any] = val[:dim] snake_case__ : Tuple = val[dim : dim * 2] snake_case__ : Union[str, Any] = val[-dim:] else: snake_case__ : int = key_split[2] snake_case__ : Dict = config.text_config.hidden_size if "weight" in key: snake_case__ : Dict = val[:dim, :] snake_case__ : Optional[Any] = val[ dim : dim * 2, : ] snake_case__ : Tuple = val[-dim:, :] else: snake_case__ : Tuple = val[:dim] snake_case__ : Optional[int] = val[ dim : dim * 2 ] snake_case__ : str = val[-dim:] else: snake_case__ : List[Any] = rename_key(SCREAMING_SNAKE_CASE_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case__ : Optional[Any] = val.T snake_case__ : List[str] = val return orig_state_dict def _A ( snake_case__ : Union[str, Any] ): if num_frames == 8: snake_case__ : Tuple = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: snake_case__ : Optional[int] = '''eating_spaghetti.npy''' elif num_frames == 32: snake_case__ : int = '''eating_spaghetti_32_frames.npy''' snake_case__ : Optional[int] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' , ) snake_case__ : str = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) def _A ( snake_case__ : str , snake_case__ : Dict=None , snake_case__ : List[Any]=False ): snake_case__ : int = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } snake_case__ : int = model_to_url[model_name] snake_case__ : Dict = 8 if "16-frames" in model_name: snake_case__ : List[Any] = 16 elif "shot" in model_name: snake_case__ : Optional[int] = 32 snake_case__ : Tuple = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : List[str] = XCLIPModel(SCREAMING_SNAKE_CASE_ ) model.eval() if "drive" in checkpoint_url: snake_case__ : List[str] = '''pytorch_model.bin''' gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ ) snake_case__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] else: snake_case__ : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )['''model'''] snake_case__ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[int] = XCLIPModel(SCREAMING_SNAKE_CASE_ ) snake_case__ ,snake_case__ : Tuple = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case__ : Tuple = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24 snake_case__ : List[Any] = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ ) snake_case__ : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) snake_case__ : int = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) snake_case__ : Optional[Any] = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) snake_case__ : int = prepare_video(SCREAMING_SNAKE_CASE_ ) snake_case__ : Dict = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE_ ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): snake_case__ : List[str] = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs snake_case__ : Optional[int] = outputs.logits_per_video snake_case__ : str = logits_per_video.softmax(dim=1 ) print('''Probs:''' , SCREAMING_SNAKE_CASE_ ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case__ : int = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case__ : str = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": snake_case__ : Union[str, Any] = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case__ : List[str] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": snake_case__ : Optional[Any] = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case__ : List[str] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case__ : str = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case__ : Tuple = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case__ : Tuple = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case__ : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case__ : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case__ : List[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case__ : List[str] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case__ : str = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case__ : Any = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case__ : Union[str, Any] = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case__ : List[str] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case__ : Dict = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''nielsr''' ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''nielsr''' ) slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''nielsr''' ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCAmelCase : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
700
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
0
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCAmelCase : Optional[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCAmelCase : List[str] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCAmelCase : Dict = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False ) -> Optional[Any]: """simple docstring""" if rouge_types is None: snake_case__ : Optional[int] = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] snake_case__ : Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=lowerCAmelCase__ , use_stemmer=lowerCAmelCase__ ) if use_aggregator: snake_case__ : Tuple = scoring.BootstrapAggregator() else: snake_case__ : List[Any] = [] for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case__ : List[Any] = scorer.score(lowerCAmelCase__ , lowerCAmelCase__ ) if use_aggregator: aggregator.add_scores(lowerCAmelCase__ ) else: scores.append(lowerCAmelCase__ ) if use_aggregator: snake_case__ : Dict = aggregator.aggregate() else: snake_case__ : str = {} for key in scores[0]: snake_case__ : int = [score[key] for score in scores] return result
701
'''simple docstring''' def _A ( snake_case__ : int = 4_00_00_00 ): snake_case__ : int = [] snake_case__ ,snake_case__ : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) snake_case__ ,snake_case__ : Any = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
694
0
'''simple docstring''' from __future__ import annotations _lowerCAmelCase : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _A ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str , ): snake_case__ : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the reference grid snake_case__ : Tuple = 1 snake_case__ : List[str] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the action grid snake_case__ : Optional[Any] = init[0] snake_case__ : Dict = init[1] snake_case__ : int = 0 snake_case__ : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell snake_case__ : int = [[f, g, x, y]] snake_case__ : Union[str, Any] = False # flag that is set when search is complete snake_case__ : int = False # flag set if we can't find expand while not found and not resign: if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() snake_case__ : List[str] = cell.pop() snake_case__ : Optional[int] = next_cell[2] snake_case__ : str = next_cell[3] snake_case__ : List[str] = next_cell[1] if x == goal[0] and y == goal[1]: snake_case__ : int = True else: for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions snake_case__ : List[Any] = x + DIRECTIONS[i][0] snake_case__ : List[str] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: snake_case__ : int = g + cost snake_case__ : Any = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) snake_case__ : List[str] = 1 snake_case__ : List[str] = i snake_case__ : Tuple = [] snake_case__ : Optional[int] = goal[0] snake_case__ : List[Any] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: snake_case__ : Tuple = x - DIRECTIONS[action[x][y]][0] snake_case__ : Optional[Any] = y - DIRECTIONS[action[x][y]][1] snake_case__ : Tuple = xa snake_case__ : List[str] = ya invpath.append([x, y] ) snake_case__ : Any = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] ) return path, action if __name__ == "__main__": _lowerCAmelCase : List[str] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] _lowerCAmelCase : List[str] = [0, 0] # all coordinates are given in format [y,x] _lowerCAmelCase : Any = [len(grid) - 1, len(grid[0]) - 1] _lowerCAmelCase : Optional[Any] = 1 # the cost map which pushes the path closer to the goal _lowerCAmelCase : Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): _lowerCAmelCase : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map _lowerCAmelCase : Optional[int] = 9_9 _lowerCAmelCase : Optional[Any] = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
702
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
0
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Dict = False if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--repo_path", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") _lowerCAmelCase : Dict = parser.parse_args() _lowerCAmelCase : Tuple = { "image_size": "sample_size", "num_res_blocks": "layers_per_block", "block_channels": "block_out_channels", "down_blocks": "down_block_types", "up_blocks": "up_block_types", "downscale_freq_shift": "freq_shift", "resnet_num_groups": "norm_num_groups", "resnet_act_fn": "act_fn", "resnet_eps": "norm_eps", "num_head_channels": "attention_head_dim", } _lowerCAmelCase : List[Any] = { "time_steps": "time_proj", "mid": "mid_block", "downsample_blocks": "down_blocks", "upsample_blocks": "up_blocks", } _lowerCAmelCase : int = "" if has_file(args.repo_path, "config.json") else "unet" with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader: _lowerCAmelCase : int = reader.read() _lowerCAmelCase : int = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, "config.json"): _lowerCAmelCase : Tuple = UNetaDModel(**config) else: _lowerCAmelCase : int = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel _lowerCAmelCase : str = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) _lowerCAmelCase : int = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: _lowerCAmelCase : str = config[key] del config[key] _lowerCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["down_block_types"]] _lowerCAmelCase : Optional[int] = [k.replace("UNetRes", "") for k in config["up_block_types"]] if do_only_weights: _lowerCAmelCase : Dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin")) _lowerCAmelCase : str = {} for param_key, param_value in state_dict.items(): if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"): continue _lowerCAmelCase : List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(".")[0] == key: _lowerCAmelCase : Optional[int] = param_value _lowerCAmelCase : List[Any] = True if not has_changed: _lowerCAmelCase : str = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
703
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class snake_case ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : List[str] = torch.nn.Linear(10 , 10 ) snake_case__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) snake_case__ : str = Accelerator() snake_case__ : str = accelerator.prepare(UpperCamelCase_ ) try: pickle.loads(pickle.dumps(UpperCamelCase_ ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
704
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 20 ): snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Union[str, Any] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: _lowerCAmelCase : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
694
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class snake_case ( lowercase__ ): """simple docstring""" _lowerCAmelCase = 'data2vec-text' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) snake_case__ : Optional[Any] = vocab_size snake_case__ : List[str] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : Tuple = hidden_act snake_case__ : Union[str, Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : Optional[Any] = type_vocab_size snake_case__ : Optional[Any] = initializer_range snake_case__ : str = layer_norm_eps snake_case__ : Union[str, Any] = position_embedding_type snake_case__ : List[Any] = use_cache snake_case__ : int = classifier_dropout class snake_case ( lowercase__ ): """simple docstring""" @property def lowercase__ ( self ) -> Any: """simple docstring""" if self.task == "multiple-choice": snake_case__ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
705
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (EulerDiscreteScheduler,) _lowerCAmelCase = 1_0 def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Any = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase ) def lowercase__ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Any = self.get_scheduler_config() snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Dict = torch.manual_seed(0 ) snake_case__ : Any = self.dummy_model() snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[Any] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : int = model(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : Optional[int] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Union[str, Any] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[int] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Tuple = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : int = output.prev_sample snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : str = self.get_scheduler_config() snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Dict = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Optional[Any] = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
694
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _A ( snake_case__ : Dict ): snake_case__ : int = 3_84 if "tiny" in model_name: snake_case__ : List[str] = [3, 3, 9, 3] snake_case__ : int = [96, 1_92, 3_84, 7_68] if "small" in model_name: snake_case__ : Tuple = [3, 3, 27, 3] snake_case__ : List[Any] = [96, 1_92, 3_84, 7_68] if "base" in model_name: snake_case__ : List[str] = [3, 3, 27, 3] snake_case__ : Any = [1_28, 2_56, 5_12, 10_24] snake_case__ : Optional[int] = 5_12 if "large" in model_name: snake_case__ : str = [3, 3, 27, 3] snake_case__ : str = [1_92, 3_84, 7_68, 15_36] snake_case__ : str = 7_68 if "xlarge" in model_name: snake_case__ : Optional[int] = [3, 3, 27, 3] snake_case__ : Optional[int] = [2_56, 5_12, 10_24, 20_48] snake_case__ : List[str] = 10_24 # set label information snake_case__ : Optional[Any] = 1_50 snake_case__ : Union[str, Any] = 'huggingface/label-files' snake_case__ : Tuple = 'ade20k-id2label.json' snake_case__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} snake_case__ : int = {v: k for k, v in idalabel.items()} snake_case__ : int = ConvNextConfig( depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) snake_case__ : List[Any] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def _A ( snake_case__ : int ): snake_case__ : str = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def _A ( snake_case__ : Dict , snake_case__ : int , snake_case__ : str ): snake_case__ : Tuple = dct.pop(lowerCamelCase_ ) snake_case__ : Optional[Any] = val def _A ( snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple ): snake_case__ : Optional[int] = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } snake_case__ : Optional[int] = model_name_to_url[model_name] snake_case__ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' )['state_dict'] snake_case__ : Tuple = get_upernet_config(lowerCamelCase_ ) snake_case__ : List[Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): snake_case__ : Optional[Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: snake_case__ : Tuple = key.replace('''bn''' , '''batch_norm''' ) snake_case__ : List[Any] = val # rename keys snake_case__ : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image snake_case__ : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' snake_case__ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' ) snake_case__ : List[Any] = SegformerImageProcessor() snake_case__ : Tuple = processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): snake_case__ : Union[str, Any] = model(lowerCamelCase_ ) if model_name == "upernet-convnext-tiny": snake_case__ : List[Any] = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ) elif model_name == "upernet-convnext-small": snake_case__ : str = torch.tensor( [[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] ) elif model_name == "upernet-convnext-base": snake_case__ : Optional[int] = torch.tensor( [[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] ) elif model_name == "upernet-convnext-large": snake_case__ : Union[str, Any] = torch.tensor( [[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] ) elif model_name == "upernet-convnext-xlarge": snake_case__ : Optional[int] = torch.tensor( [[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[F'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
706
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _lowerCAmelCase = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _lowerCAmelCase = {"facebook/blenderbot_small-90M": 5_1_2} def _A ( snake_case__ : str ): snake_case__ : Tuple = set() snake_case__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : Union[str, Any] = char snake_case__ : List[Any] = set(snake_case__ ) return pairs class snake_case ( UpperCAmelCase_ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> Any: """simple docstring""" super().__init__(unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , **_lowercase ) with open(_lowercase , encoding='''utf-8''' ) as vocab_handle: snake_case__ : int = json.load(_lowercase ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(_lowercase , encoding='''utf-8''' ) as merges_handle: snake_case__ : int = merges_handle.read().split('''\n''' )[1:-1] snake_case__ : Any = [tuple(merge.split() ) for merge in merges] snake_case__ : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) snake_case__ : Union[str, Any] = {} @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.encoder ) def lowercase__ ( self ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] snake_case__ : Tuple = re.sub('''([.,!?()])''' , R''' \1''' , _lowercase ) snake_case__ : Union[str, Any] = re.sub('''(\')''' , R''' \1 ''' , _lowercase ) snake_case__ : Dict = re.sub(R'''\s{2,}''' , ''' ''' , _lowercase ) if "\n" in token: snake_case__ : str = token.replace('''\n''' , ''' __newln__''' ) snake_case__ : Tuple = token.split(''' ''' ) snake_case__ : int = [] for token in tokens: if not len(_lowercase ): continue snake_case__ : Dict = token.lower() snake_case__ : Optional[Any] = tuple(_lowercase ) snake_case__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) snake_case__ : int = get_pairs(_lowercase ) if not pairs: words.append(_lowercase ) continue while True: snake_case__ : List[Any] = min(_lowercase , key=lambda lowerCamelCase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break snake_case__ : Any = bigram snake_case__ : int = [] snake_case__ : Dict = 0 while i < len(_lowercase ): try: snake_case__ : Optional[int] = word.index(_lowercase , _lowercase ) new_word.extend(word[i:j] ) snake_case__ : List[str] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : Tuple = tuple(_lowercase ) snake_case__ : Union[str, Any] = new_word if len(_lowercase ) == 1: break else: snake_case__ : List[Any] = get_pairs(_lowercase ) snake_case__ : Dict = '@@ '.join(_lowercase ) snake_case__ : int = word[:-4] snake_case__ : Tuple = word words.append(_lowercase ) return " ".join(_lowercase ) def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : str = [] snake_case__ : Union[str, Any] = re.findall(R'''\S+\n?''' , _lowercase ) for token in words: split_tokens.extend(list(self.bpe(_lowercase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : str = token.lower() return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) ) def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.decoder.get(_lowercase , self.unk_token ) def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" snake_case__ : str = ' '.join(_lowercase ).replace('''@@ ''' , '''''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_lowercase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : List[str] = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' ) snake_case__ : Union[str, Any] = 0 with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) snake_case__ : Optional[int] = token_index writer.write(''' '''.join(_lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file
707
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
694
0
def _A ( snake_case__ : str , snake_case__ : str ): if len(__A ) != len(__A ): raise ValueError('''String lengths must match!''' ) snake_case__ : List[Any] = 0 for chara, chara in zip(__A , __A ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _A ( snake_case__ : Union[str, Any] ): snake_case__ : Union[str, Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def _A ( snake_case__ : Tuple ): snake_case__ ,snake_case__ : Optional[Any] = emb.weight.shape snake_case__ : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) snake_case__ : Tuple = emb.weight.data return lin_layer def _A ( snake_case__ : Dict ): snake_case__ : Tuple = torch.load(__lowerCAmelCase , map_location='''cpu''' ) snake_case__ : List[str] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] snake_case__ : Optional[int] = mam_aaa['''model'''] remove_ignore_keys_(__lowerCAmelCase ) snake_case__ : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0] snake_case__ : Any = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) snake_case__ : str = state_dict['''decoder.embed_tokens.weight'''] snake_case__ : int = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) snake_case__ : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") _lowerCAmelCase : Tuple = parser.parse_args() _lowerCAmelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
709
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCAmelCase : Union[str, Any] = "\nimport os\n" _lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n" _lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowerCAmelCase : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' ) with open(snake_case__ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case__ ) snake_case__ : int = get_imports(snake_case__ ) assert parsed_imports == ["os"]
694
0
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") _lowerCAmelCase : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") _lowerCAmelCase : List[str] = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: _lowerCAmelCase : Optional[Any] = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _lowerCAmelCase : List[str] = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
710
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'markuplm' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Optional[int] = vocab_size snake_case__ : Tuple = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : List[Any] = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : str = type_vocab_size snake_case__ : List[str] = initializer_range snake_case__ : List[str] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : int = classifier_dropout # additional properties snake_case__ : Union[str, Any] = max_depth snake_case__ : Dict = max_xpath_tag_unit_embeddings snake_case__ : Any = max_xpath_subs_unit_embeddings snake_case__ : int = tag_pad_id snake_case__ : Tuple = subs_pad_id snake_case__ : Dict = xpath_unit_hidden_size
694
0
'''simple docstring''' import math _lowerCAmelCase : Any = 1_0 _lowerCAmelCase : List[str] = 7 _lowerCAmelCase : int = BALLS_PER_COLOUR * NUM_COLOURS def _A ( snake_case__ : int = 20 ): snake_case__ : Optional[int] = math.comb(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCAmelCase ) snake_case__ : Any = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
711
'''simple docstring''' def _A ( snake_case__ : float ): return 10 - x * x def _A ( snake_case__ : float , snake_case__ : float ): # Bolzano theory in order to find if there is a root between a and b if equation(snake_case__ ) * equation(snake_case__ ) >= 0: raise ValueError('''Wrong space!''' ) snake_case__ : List[str] = a while (b - a) >= 0.01: # Find middle point snake_case__ : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(snake_case__ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case__ ) * equation(snake_case__ ) < 0: snake_case__ : Dict = c else: snake_case__ : List[str] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
694
0
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : str = """ylacombe/bark-small""" snake_case__ : List[Any] = tempfile.mkdtemp() snake_case__ : Dict = """en_speaker_1""" snake_case__ : Optional[int] = """This is a test string""" snake_case__ : str = """speaker_embeddings_path.json""" snake_case__ : List[str] = """speaker_embeddings""" def lowercase__ ( self , **lowerCamelCase ) -> List[str]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **_a ) def lowercase__ ( self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : str = self.get_tokenizer() snake_case__ : Optional[int] = BarkProcessor(tokenizer=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case__ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) snake_case__ : str = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case__ : Optional[Any] = 35 snake_case__ : Any = 2 snake_case__ : Tuple = 8 snake_case__ : Optional[int] = { """semantic_prompt""": np.ones(_a ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case__ : Tuple = processor(text=self.input_string , voice_preset=_a ) snake_case__ : Any = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case__ : str = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(_a , **_a ) snake_case__ : List[Any] = processor(text=self.input_string , voice_preset=_a ) snake_case__ : Tuple = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case__ : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[str] = self.get_tokenizer() snake_case__ : List[Any] = BarkProcessor(tokenizer=_a ) snake_case__ : Union[str, Any] = processor(text=self.input_string ) snake_case__ : Dict = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
712
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> str: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class snake_case ( metaclass=_snake_case ): """simple docstring""" _lowerCAmelCase = ['sentencepiece'] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] )
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def _A ( snake_case__ : str ): snake_case__ : Tuple = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def _A ( snake_case__ : int , snake_case__ : Optional[int] ): snake_case__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def _A ( snake_case__ : str ): snake_case__ : List[str] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') ) return token def _A ( ): snake_case__ : Tuple = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def _A ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Dict ): snake_case__ : List[str] = '''imagenet-1k-id2label.json''' snake_case__ : List[str] = 10_00 snake_case__ : Union[str, Any] = '''huggingface/label-files''' snake_case__ : Optional[Any] = num_labels snake_case__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) ) , '''r''' ) ) snake_case__ : Any = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : int = {v: k for k, v in idalabel.items()} snake_case__ : Optional[int] = CvtConfig(num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": snake_case__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": snake_case__ : Union[str, Any] = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case__ : Optional[int] = [2, 2, 20] snake_case__ : List[str] = [3, 12, 16] snake_case__ : Any = [1_92, 7_68, 10_24] snake_case__ : Optional[int] = CvtForImageClassification(lowerCAmelCase__ ) snake_case__ : Tuple = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) snake_case__ : Tuple = image_size snake_case__ : List[str] = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) ) snake_case__ : str = OrderedDict() snake_case__ : Any = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case__ : Optional[int] = list_of_state_dict + cls_token(lowerCAmelCase__ ) snake_case__ : Tuple = list_of_state_dict + embeddings(lowerCAmelCase__ ) for cnt in range(config.depth[idx] ): snake_case__ : Tuple = list_of_state_dict + attention(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case__ : Optional[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) ): snake_case__ : Any = original_weights[list_of_state_dict[i][1]] model.load_state_dict(lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) image_processor.save_pretrained(lowerCAmelCase__ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
714
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'encoder-decoder' _lowerCAmelCase = True def __init__( self , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case__ : List[str] = kwargs.pop('''encoder''' ) snake_case__ : Any = encoder_config.pop('''model_type''' ) snake_case__ : List[str] = kwargs.pop('''decoder''' ) snake_case__ : str = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase ) snake_case__ : str = True @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case__ : Optional[int] = True snake_case__ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = copy.deepcopy(self.__dict__ ) snake_case__ : List[Any] = self.encoder.to_dict() snake_case__ : str = self.decoder.to_dict() snake_case__ : Any = self.__class__.model_type return output
694
0
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class snake_case ( __lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = 8 , **lowerCamelCase , ) -> None: """simple docstring""" super().__init__(**_UpperCamelCase ) snake_case__ : Tuple = do_rescale snake_case__ : str = rescale_factor snake_case__ : Any = do_pad snake_case__ : Optional[int] = pad_size def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ) -> Union[str, Any]: """simple docstring""" snake_case__ : Any = get_image_size(_UpperCamelCase ) snake_case__ : str = (old_height // size + 1) * size - old_height snake_case__ : List[Any] = (old_width // size + 1) * size - old_width return pad(_UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=_UpperCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : int = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ : List[Any] = do_pad if do_pad is not None else self.do_pad snake_case__ : List[str] = pad_size if pad_size is not None else self.pad_size snake_case__ : str = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(_UpperCamelCase ) for image in images] if do_rescale: snake_case__ : Union[str, Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_pad: snake_case__ : Union[str, Any] = [self.pad(_UpperCamelCase , size=_UpperCamelCase ) for image in images] snake_case__ : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] snake_case__ : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
715
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} _lowerCAmelCase : Dict = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } _lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4} class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) snake_case__ : int = vocab_file snake_case__ : Optional[Any] = monolingual_vocab_file snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility snake_case__ : Dict = {} snake_case__ : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : List[str] = cnt cnt += 1 with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): snake_case__ : Optional[int] = line.strip().split()[0] snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids ) if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids: snake_case__ : Any = len(self.fairseq_tokens_to_ids ) snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" snake_case__ : int = self.__dict__.copy() snake_case__ : Any = None snake_case__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case__ : Dict = {} snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : str = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]: """simple docstring""" snake_case__ : List[str] = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : Optional[int] = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , '''wb''' ) as fi: snake_case__ : Dict = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowerCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
694
0
'''simple docstring''' _lowerCAmelCase : str = {str(digit): digit**5 for digit in range(1_0)} def _A ( snake_case__ : Optional[int] ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCamelCase ) ) def _A ( ): return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(_lowerCamelCase ) ) if __name__ == "__main__": print(solution())
716
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowerCAmelCase : Optional[int] = parser.parse_args() _lowerCAmelCase : Union[str, Any] = "cpu" _lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model" _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowerCAmelCase : Optional[Any] = pipe.to(device) # to channels last _lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) _lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last) _lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4) _lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9 _lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status) try: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowerCAmelCase : Tuple = 6_6_6 _lowerCAmelCase : str = torch.Generator(device).manual_seed(seed) _lowerCAmelCase : Dict = {"generator": generator} if args.steps is not None: _lowerCAmelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
694
0
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Union[str, Any] = None ops.enable_eager_execution_internal() snake_case__ : Tuple = tf.config.list_physical_devices('''CPU''' ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) snake_case__ : Dict = tf.config.list_logical_devices(device_type='''CPU''' ) snake_case__ : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): snake_case__ : Optional[int] = GradientAccumulator() snake_case__ : Tuple = tf.Variable([4.0, 3.0] ) snake_case__ : List[Any] = create_optimizer(5E-5 , 10 , 5 ) snake_case__ : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(lowerCamelCase ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(lowerCamelCase , lowerCamelCase ): with strategy.scope(): snake_case__ : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(lowerCamelCase , lowerCamelCase ): snake_case__ : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
717
'''simple docstring''' import socket def _A ( ): snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) snake_case__ : str = socket.gethostname() snake_case__ : Union[str, Any] = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: snake_case__ : int = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
694
0
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=2 , ) -> Optional[int]: """simple docstring""" snake_case__ : Any = parent snake_case__ : Optional[Any] = batch_size snake_case__ : List[str] = image_size snake_case__ : int = patch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Tuple = is_training snake_case__ : str = use_labels snake_case__ : List[str] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Any = intermediate_size snake_case__ : Union[str, Any] = hidden_act snake_case__ : Tuple = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : str = type_sequence_label_size snake_case__ : List[Any] = initializer_range snake_case__ : List[Any] = scope snake_case__ : Any = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) snake_case__ : int = (image_size // patch_size) ** 2 snake_case__ : Optional[Any] = num_patches + 2 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : List[Any] = None if self.use_labels: snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : Any = DeiTModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() snake_case__ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : str = DeiTForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() snake_case__ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case__ : str = 1 snake_case__ : List[Any] = DeiTForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() snake_case__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[Any] = self.type_sequence_label_size snake_case__ : Union[str, Any] = DeiTForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() snake_case__ : int = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ : List[Any] = 1 snake_case__ : Tuple = DeiTForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[int] = self.prepare_config_and_inputs() ( snake_case__ ) : List[str] = config_and_inputs snake_case__ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCAmelCase = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Dict = DeiTModelTester(self ) snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowercase__ ( self ) -> List[str]: """simple docstring""" pass def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) ) def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Tuple = model_class(lowerCamelCase_ ) snake_case__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : List[Any] = [*signature.parameters.keys()] snake_case__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> int: """simple docstring""" snake_case__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowercase__ ( self ) -> Dict: """simple docstring""" if not self.model_tester.is_training: return snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : str = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCamelCase_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue snake_case__ : int = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() snake_case__ : Tuple = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) snake_case__ : Any = model(**lowerCamelCase_ ).loss loss.backward() def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return snake_case__ : Union[str, Any] = False snake_case__ : Optional[Any] = True for model_class in self.all_model_classes: if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue snake_case__ : Dict = model_class(lowerCamelCase_ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase_ ) model.train() snake_case__ : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) snake_case__ : Tuple = model(**lowerCamelCase_ ).loss loss.backward() def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Tuple = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ): snake_case__ : str = problem_type['''title'''] snake_case__ : Dict = problem_type['''num_labels'''] snake_case__ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() snake_case__ : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if problem_type["num_labels"] > 1: snake_case__ : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) snake_case__ : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCamelCase_ ) as warning_list: snake_case__ : List[str] = model(**lowerCamelCase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def lowercase__ ( self ) -> Tuple: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[str] = DeiTModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def _A ( ): snake_case__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase__ ( self ) -> Optional[int]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( lowerCamelCase_ ) snake_case__ : Optional[Any] = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : str = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): snake_case__ : Tuple = model(**lowerCamelCase_ ) # verify the logits snake_case__ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) snake_case__ : Tuple = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Optional[Any] = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) snake_case__ : Optional[int] = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ) snake_case__ : Tuple = inputs.pixel_values.to(lowerCamelCase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): snake_case__ : List[Any] = model(lowerCamelCase_ )
718
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
'''simple docstring''' def _A ( snake_case__ : Optional[int] ): snake_case__ : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _A ( snake_case__ : Any ): snake_case__ : List[Any] = 0 while number > 0: snake_case__ : Tuple = number % 10 sum_of_digits += last_digit snake_case__ : str = number // 10 # Removing the last_digit from the given number return sum_of_digits def _A ( snake_case__ : Union[str, Any] = 1_00 ): snake_case__ : Optional[Any] = factorial(_A ) snake_case__ : Optional[int] = split_and_add(_A ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
719
'''simple docstring''' from math import isqrt def _A ( snake_case__ : int ): return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) ) def _A ( snake_case__ : int = 10**6 ): snake_case__ : str = 0 snake_case__ : List[str] = 1 snake_case__ : str = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
694
0
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger() def _A ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = True ): print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": snake_case__ : Dict = timm.create_model('''levit_128s''' , pretrained=snake_case__ ) else: snake_case__ : str = timm.create_model('''levit_128''' , pretrained=snake_case__ ) if hidden_sizes == 1_92: snake_case__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=snake_case__ ) if hidden_sizes == 2_56: snake_case__ : int = timm.create_model('''levit_256''' , pretrained=snake_case__ ) if hidden_sizes == 3_84: snake_case__ : Any = timm.create_model('''levit_384''' , pretrained=snake_case__ ) from_model.eval() snake_case__ : Union[str, Any] = LevitForImageClassificationWithTeacher(snake_case__ ).eval() snake_case__ : int = OrderedDict() snake_case__ : Tuple = from_model.state_dict() snake_case__ : int = list(from_model.state_dict().keys() ) snake_case__ : Dict = list(our_model.state_dict().keys() ) print(len(snake_case__ ) , len(snake_case__ ) ) for i in range(len(snake_case__ ) ): snake_case__ : int = weights[og_keys[i]] our_model.load_state_dict(snake_case__ ) snake_case__ : Tuple = torch.randn((2, 3, 2_24, 2_24) ) snake_case__ : List[Any] = from_model(snake_case__ ) snake_case__ : Tuple = our_model(snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ ), "The model logits don't match the original one." snake_case__ : List[Any] = name print(snake_case__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) snake_case__ : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def _A ( snake_case__ : Optional[Any] , snake_case__ : Tuple = None , snake_case__ : Any = True ): snake_case__ : str = '''imagenet-1k-id2label.json''' snake_case__ : Optional[Any] = 10_00 snake_case__ : List[Any] = (1, num_labels) snake_case__ : int = '''huggingface/label-files''' snake_case__ : str = num_labels snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ : Any = {int(snake_case__ ): v for k, v in idalabel.items()} snake_case__ : str = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : str = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) snake_case__ : Dict = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } snake_case__ : str = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) _lowerCAmelCase : Any = parser.parse_args() _lowerCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
720
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]: """simple docstring""" snake_case__ : Union[str, Any] = fa_score( lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase ) return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
694
0
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _A ( snake_case__ : str , snake_case__ : str , **snake_case__ : Optional[int] ): snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) snake_case__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
721
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 42 class snake_case ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": snake_case__ : Optional[int] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase ) snake_case__ : List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case__ : Dict = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase ) snake_case__ : Dict = block_out_channels[0] if use_timestep_embedding: snake_case__ : Any = block_out_channels[0] * 4 snake_case__ : Optional[Any] = TimestepEmbedding( in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , ) snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = nn.ModuleList([] ) snake_case__ : List[str] = None # down snake_case__ : Tuple = in_channels for i, down_block_type in enumerate(lowerCamelCase ): snake_case__ : Tuple = output_channel snake_case__ : List[str] = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1 snake_case__ : Dict = get_down_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCamelCase ) # mid snake_case__ : Optional[int] = get_mid_block( lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , ) # up snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) ) snake_case__ : Any = reversed_block_out_channels[0] if out_block_type is None: snake_case__ : List[Any] = out_channels else: snake_case__ : Dict = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase ): snake_case__ : List[str] = output_channel snake_case__ : List[str] = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels ) snake_case__ : List[str] = i == len(lowerCamelCase ) - 1 snake_case__ : str = get_up_block( lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCamelCase ) snake_case__ : Optional[Any] = output_channel # out snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) snake_case__ : Union[str, Any] = get_out_block( out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" snake_case__ : str = timestep if not torch.is_tensor(lowerCamelCase ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0: snake_case__ : Optional[Any] = timesteps[None].to(sample.device ) snake_case__ : Any = self.time_proj(lowerCamelCase ) if self.config.use_timestep_embedding: snake_case__ : Tuple = self.time_mlp(lowerCamelCase ) else: snake_case__ : Union[str, Any] = timestep_embed[..., None] snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down snake_case__ : List[Any] = () for downsample_block in self.down_blocks: snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): snake_case__ : str = down_block_res_samples[-1:] snake_case__ : int = down_block_res_samples[:-1] snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase ) # 5. post-process if self.out_block: snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase )
694
0
'''simple docstring''' def _A ( snake_case__ : list[int] , snake_case__ : str ): snake_case__ : List[str] = int(snake_case__ ) # Initialize Result snake_case__ : Dict = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _lowerCAmelCase : int = [] _lowerCAmelCase : Union[str, Any] = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): _lowerCAmelCase : Union[str, Any] = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) _lowerCAmelCase : Any = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter _lowerCAmelCase : Any = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] _lowerCAmelCase : Optional[Any] = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F'''Following is minimal change for {value}: ''') _lowerCAmelCase : str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
700
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def _A ( snake_case__ : str , snake_case__ : str ): snake_case__ : Tuple = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } snake_case__ : List[str] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case__ : str = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case__ : Any = os.path.join(get_home_dir() , '''models''' ) snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) snake_case__ : Optional[int] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) snake_case__ : Any = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case__ : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(snake_case__ ), } snake_case__ : Dict = BertConfig.from_dict(snake_case__ ) snake_case__ : Dict = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ): snake_case__ : Union[str, Any] = hf_param.shape snake_case__ : Any = to_torch(params[gluon_param] ) snake_case__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) snake_case__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) snake_case__ : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case__ : str = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention snake_case__ : BertSelfAttention = layer.attention.self snake_case__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case__ : Dict = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case__ : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case__ : int = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case__ : List[Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case__ : BertSelfOutput = layer.attention.output snake_case__ : Optional[Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case__ : List[str] = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case__ : Any = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case__ : BertIntermediate = layer.intermediate snake_case__ : int = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case__ : Optional[int] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case__ : BertOutput = layer.output snake_case__ : Any = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case__ : Tuple = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids'''] # Get gluon output snake_case__ : List[str] = mx.nd.array([input_ids] ) snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' ) snake_case__ : str = hf_bort_model(**snake_case__ )[0] snake_case__ : str = output_gluon[0].asnumpy() snake_case__ : str = output_hf[0].detach().numpy() snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
694
0
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") _lowerCAmelCase : Tuple = parser.parse_args() if args.model_type == "roberta": _lowerCAmelCase : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name) _lowerCAmelCase : Optional[int] = "roberta" elif args.model_type == "gpt2": _lowerCAmelCase : int = GPTaLMHeadModel.from_pretrained(args.model_name) _lowerCAmelCase : Optional[Any] = "transformer" _lowerCAmelCase : List[str] = model.state_dict() _lowerCAmelCase : str = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _lowerCAmelCase : Any = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _lowerCAmelCase : Tuple = F'''{prefix}.embeddings.{w}.weight''' _lowerCAmelCase : List[Any] = state_dict[param_name] for w in ["weight", "bias"]: _lowerCAmelCase : List[Any] = F'''{prefix}.embeddings.LayerNorm.{w}''' _lowerCAmelCase : Any = state_dict[param_name] # Transformer Blocks # _lowerCAmelCase : Any = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _lowerCAmelCase : Dict = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] _lowerCAmelCase : Dict = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _lowerCAmelCase : Dict = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _lowerCAmelCase : Tuple = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: _lowerCAmelCase : Any = state_dict[F'''lm_head.dense.{w}'''] _lowerCAmelCase : Optional[Any] = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _lowerCAmelCase : Optional[Any] = state_dict[F'''{prefix}.ln_f.{w}'''] _lowerCAmelCase : List[Any] = state_dict["lm_head.weight"] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
701
'''simple docstring''' def _A ( snake_case__ : int = 4_00_00_00 ): snake_case__ : int = [] snake_case__ ,snake_case__ : Union[str, Any] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) snake_case__ ,snake_case__ : Any = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
694
0
'''simple docstring''' import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def _A ( snake_case__ : List[Any] ): snake_case__ : Optional[Any] = botoa.client('''iam''' ) snake_case__ : Any = { '''Version''': '''2012-10-17''', '''Statement''': [ {'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=snake_case__ , AssumeRolePolicyDocument=json.dumps(snake_case__ , indent=2 ) ) snake_case__ : Tuple = { '''Version''': '''2012-10-17''', '''Statement''': [ { '''Effect''': '''Allow''', '''Action''': [ '''sagemaker:*''', '''ecr:GetDownloadUrlForLayer''', '''ecr:BatchGetImage''', '''ecr:BatchCheckLayerAvailability''', '''ecr:GetAuthorizationToken''', '''cloudwatch:PutMetricData''', '''cloudwatch:GetMetricData''', '''cloudwatch:GetMetricStatistics''', '''cloudwatch:ListMetrics''', '''logs:CreateLogGroup''', '''logs:CreateLogStream''', '''logs:DescribeLogStreams''', '''logs:PutLogEvents''', '''logs:GetLogEvents''', '''s3:CreateBucket''', '''s3:ListBucket''', '''s3:GetBucketLocation''', '''s3:GetObject''', '''s3:PutObject''', ], '''Resource''': '''*''', } ], } # attach policy to role iam_client.put_role_policy( RoleName=snake_case__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(snake_case__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def _A ( snake_case__ : int ): snake_case__ : str = botoa.client('''iam''' ) return iam_client.get_role(RoleName=snake_case__ )["Role"]["Arn"] def _A ( ): snake_case__ : Any = _ask_options( '''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , snake_case__ , ) snake_case__ : str = None if credentials_configuration == 0: snake_case__ : Optional[int] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' ) snake_case__ : List[str] = aws_profile else: print( '''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,''' '''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' ) snake_case__ : Union[str, Any] = _ask_field('''AWS Access Key ID: ''' ) snake_case__ : Optional[int] = aws_access_key_id snake_case__ : Dict = _ask_field('''AWS Secret Access Key: ''' ) snake_case__ : Optional[int] = aws_secret_access_key snake_case__ : str = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' ) snake_case__ : str = aws_region snake_case__ : Union[str, Any] = _ask_options( '''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , snake_case__ , ) if role_management == 0: snake_case__ : str = _ask_field('''Enter your IAM role name: ''' ) else: snake_case__ : Any = '''accelerate_sagemaker_execution_role''' print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(snake_case__ ) snake_case__ : List[str] = _ask_field( '''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) snake_case__ : Optional[Any] = None if is_custom_docker_image: snake_case__ : Union[str, Any] = _ask_field('''Enter your Docker image: ''' , lambda snake_case__ : str(snake_case__ ).lower() ) snake_case__ : Optional[int] = _ask_field( '''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) snake_case__ : Union[str, Any] = None if is_sagemaker_inputs_enabled: snake_case__ : Dict = _ask_field( '''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda snake_case__ : str(snake_case__ ).lower() , ) snake_case__ : Union[str, Any] = _ask_field( '''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) snake_case__ : Optional[int] = None if is_sagemaker_metrics_enabled: snake_case__ : List[str] = _ask_field( '''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda snake_case__ : str(snake_case__ ).lower() , ) snake_case__ : List[Any] = _ask_options( '''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , ) snake_case__ : Union[str, Any] = {} snake_case__ : Any = _ask_field( '''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) if use_dynamo: snake_case__ : int = '''dynamo_''' snake_case__ : Dict = _ask_options( '''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) snake_case__ : Dict = _ask_field( '''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) if use_custom_options: snake_case__ : Dict = _ask_options( '''Which mode do you want to use?''' , snake_case__ , lambda snake_case__ : TORCH_DYNAMO_MODES[int(snake_case__ )] , default='''default''' , ) snake_case__ : List[Any] = _ask_field( '''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) snake_case__ : str = _ask_field( '''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case__ , error_message='''Please enter yes or no.''' , ) snake_case__ : Tuple = '''Which EC2 instance type you want to use for your training?''' if distributed_type != SageMakerDistributedType.NO: snake_case__ : Dict = _ask_options( snake_case__ , snake_case__ , lambda snake_case__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(snake_case__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" snake_case__ : int = _ask_field(snake_case__ , lambda snake_case__ : str(snake_case__ ).lower() , default='''ml.p3.2xlarge''' ) snake_case__ : Optional[int] = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): snake_case__ : Dict = _ask_field( '''How many machines do you want use? [1]: ''' , snake_case__ , default=1 , ) snake_case__ : List[str] = _ask_options( '''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( '''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' ) return SageMakerConfig( image_uri=snake_case__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=snake_case__ , use_cpu=snake_case__ , dynamo_config=snake_case__ , eca_instance_type=snake_case__ , profile=snake_case__ , region=snake_case__ , iam_role_name=snake_case__ , mixed_precision=snake_case__ , num_machines=snake_case__ , sagemaker_inputs_file=snake_case__ , sagemaker_metrics_file=snake_case__ , )
702
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = "▁" _lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } _lowerCAmelCase : Optional[int] = { "google/pegasus-xsum": 5_1_2, } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PegasusTokenizer _lowerCAmelCase = ['input_ids', 'attention_mask'] def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]: """simple docstring""" snake_case__ : Tuple = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is''' f''' {type(lowerCamelCase )}''' ) snake_case__ : List[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 ) ] if len(set(lowerCamelCase ) ) != len(lowerCamelCase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) snake_case__ : List[Any] = additional_special_tokens_extended else: snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Union[str, Any] = vocab_file snake_case__ : List[Any] = False if not self.vocab_file else True def lowercase__ ( self , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : int = os.path.join( lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
694
0
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class snake_case ( UpperCamelCase_ ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Dict: """simple docstring""" snake_case__ : Tuple = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[Any] = seq_length snake_case__ : str = is_training snake_case__ : Any = use_input_mask snake_case__ : Tuple = use_token_type_ids snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = vocab_size snake_case__ : Any = hidden_size snake_case__ : List[str] = num_hidden_layers snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = intermediate_size snake_case__ : Dict = hidden_act snake_case__ : List[str] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : Optional[int] = max_position_embeddings snake_case__ : int = type_vocab_size snake_case__ : int = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : List[Any] = num_labels snake_case__ : str = num_choices snake_case__ : Tuple = scope def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Dict = None if self.use_input_mask: snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None snake_case__ : Tuple = None snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self ) -> Any: """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : List[Any] = DistilBertModel(config=__A ) model.to(__A ) model.eval() snake_case__ : str = model(__A , __A ) snake_case__ : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : str = DistilBertForMaskedLM(config=__A ) model.to(__A ) model.eval() snake_case__ : List[Any] = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : str = DistilBertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() snake_case__ : Dict = model( __A , attention_mask=__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" snake_case__ : Optional[Any] = self.num_labels snake_case__ : Dict = DistilBertForSequenceClassification(__A ) model.to(__A ) model.eval() snake_case__ : List[str] = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]: """simple docstring""" snake_case__ : Optional[Any] = self.num_labels snake_case__ : Tuple = DistilBertForTokenClassification(config=__A ) model.to(__A ) model.eval() snake_case__ : Tuple = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Union[str, Any] = self.num_choices snake_case__ : List[Any] = DistilBertForMultipleChoice(config=__A ) model.to(__A ) model.eval() snake_case__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : List[Any] = model( __A , attention_mask=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : List[str] = self.prepare_config_and_inputs() (snake_case__) : Optional[Any] = config_and_inputs snake_case__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _lowerCAmelCase = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = True def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : List[Any] = DistilBertModelTester(self ) snake_case__ : str = ConfigTester(self , config_class=__A , dim=37 ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__A ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__A ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__A ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A ) def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__A ) def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A ) @slow def lowercase__ ( self ) -> Optional[int]: """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[str] = DistilBertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return snake_case__ : Any = True snake_case__ : int = model_class(config=__A ) snake_case__ : Tuple = self._prepare_for_class(__A , __A ) snake_case__ : Any = torch.jit.trace( __A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , '''traced_model.pt''' ) ) snake_case__ : int = torch.jit.load(os.path.join(__A , '''traced_model.pt''' ) , map_location=__A ) loaded(inputs_dict['''input_ids'''].to(__A ) , inputs_dict['''attention_mask'''].to(__A ) ) @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) snake_case__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case__ : Optional[Any] = model(__A , attention_mask=__A )[0] snake_case__ : Any = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) snake_case__ : Union[str, Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
703
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' def _A ( snake_case__ : int ): if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError('''only integers accepted as input''' ) else: snake_case__ : List[str] = str(abs(_lowerCamelCase ) ) snake_case__ : Optional[int] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )] for index in range(len(_lowerCamelCase ) ): num_transpositions[index].pop(_lowerCamelCase ) return max( int(''''''.join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
704
'''simple docstring''' from math import factorial def _A ( snake_case__ : int = 20 ): snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Union[str, Any] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: _lowerCAmelCase : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
694
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class snake_case ( unittest.TestCase ): """simple docstring""" @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) snake_case__ : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Any = self.dummy_uncond_unet snake_case__ : List[str] = KarrasVeScheduler() snake_case__ : Union[str, Any] = KarrasVePipeline(unet=_a , scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : Union[str, Any] = pipe(num_inference_steps=2 , generator=_a , output_type='''numpy''' ).images snake_case__ : Union[str, Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = pipe(num_inference_steps=2 , generator=_a , output_type='''numpy''' , return_dict=_a )[0] snake_case__ : Dict = image[0, -3:, -3:, -1] snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : Tuple = """google/ncsnpp-celebahq-256""" snake_case__ : Dict = UNetaDModel.from_pretrained(_a ) snake_case__ : List[str] = KarrasVeScheduler() snake_case__ : Any = KarrasVePipeline(unet=_a , scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ : str = torch.manual_seed(0 ) snake_case__ : Optional[int] = pipe(num_inference_steps=20 , generator=_a , output_type='''numpy''' ).images snake_case__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) snake_case__ : Dict = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
705
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = (EulerDiscreteScheduler,) _lowerCAmelCase = 1_0 def lowercase__ ( self , **lowerCamelCase ) -> Tuple: """simple docstring""" snake_case__ : Any = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase ) return config def lowercase__ ( self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase ) def lowercase__ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Any = self.get_scheduler_config() snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Dict = torch.manual_seed(0 ) snake_case__ : Any = self.dummy_model() snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[Any] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : int = model(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> Dict: """simple docstring""" snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ : int = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : Optional[int] = sample.to(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Union[str, Any] = output.prev_sample snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : Optional[int] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Optional[int] = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Tuple = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : str = model(lowerCamelCase , lowerCamelCase ) snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : int = output.prev_sample snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : str = self.get_scheduler_config() snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase ) snake_case__ : int = torch.manual_seed(0 ) snake_case__ : Dict = self.dummy_model() snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ : Optional[Any] = sample.to(lowerCamelCase ) for t in scheduler.timesteps: snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase ) snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ) snake_case__ : Optional[int] = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) ) snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
694
0
'''simple docstring''' from collections import deque def _A ( snake_case__ : Optional[int] ): snake_case__ : Dict = len(snake_case__ ) snake_case__ : List[Any] = deque() snake_case__ : List[str] = [False for _ in range(snake_case__ )] snake_case__ : List[str] = [-1 for _ in range(snake_case__ )] snake_case__ : Optional[int] = index_of[:] def strong_connect(snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): snake_case__ : Dict = index # the number when this node is seen snake_case__ : Tuple = index # lowest rank node reachable from here index += 1 stack.append(snake_case__ ) snake_case__ : Tuple = True for w in g[v]: if index_of[w] == -1: snake_case__ : int = strong_connect(snake_case__ , snake_case__ , snake_case__ ) snake_case__ : Dict = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: snake_case__ : Dict = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: snake_case__ : int = [] snake_case__ : List[str] = stack.pop() snake_case__ : int = False component.append(snake_case__ ) while w != v: snake_case__ : Optional[int] = stack.pop() snake_case__ : str = False component.append(snake_case__ ) components.append(snake_case__ ) return index snake_case__ : Any = [] for v in range(snake_case__ ): if index_of[v] == -1: strong_connect(snake_case__ , 0 , snake_case__ ) return components def _A ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): snake_case__ : Dict = [[] for _ in range(snake_case__ )] for u, v in edges: g[u].append(snake_case__ ) return g if __name__ == "__main__": # Test _lowerCAmelCase : Optional[Any] = 7 _lowerCAmelCase : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6] _lowerCAmelCase : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _lowerCAmelCase : Dict = [(u, v) for u, v in zip(source, target)] _lowerCAmelCase : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
706
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None: """simple docstring""" snake_case__ : int = do_resize snake_case__ : Dict = do_rescale snake_case__ : Any = size_divisor snake_case__ : str = resample super().__init__(**lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor snake_case__ : Any = height // size_divisor * size_divisor snake_case__ : Union[str, Any] = width // size_divisor * size_divisor snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) return image def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray: """simple docstring""" return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature: """simple docstring""" snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor snake_case__ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images] if do_resize: snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images] snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] snake_case__ : str = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
694
0
'''simple docstring''' import math def _A ( snake_case__ : int ): assert isinstance(__snake_case , __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False snake_case__ : List[Any] = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _A ( snake_case__ : str , snake_case__ : Any=1 , **snake_case__ : Dict ): snake_case__ : Tuple = factor * value snake_case__ : Dict = value while not is_prime(__snake_case ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__snake_case ) return value
707
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ): snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
694
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class snake_case : """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=14 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=0.02 , ) -> Any: """simple docstring""" snake_case__ : Optional[int] = parent snake_case__ : List[Any] = batch_size snake_case__ : str = seq_length snake_case__ : List[str] = is_training snake_case__ : List[str] = use_input_mask snake_case__ : int = use_token_type_ids snake_case__ : str = use_labels snake_case__ : Dict = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : int = rotary_dim snake_case__ : str = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Union[str, Any] = intermediate_size snake_case__ : Optional[int] = hidden_act snake_case__ : List[Any] = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : Tuple = initializer_range snake_case__ : Any = None snake_case__ : Dict = vocab_size - 1 snake_case__ : Tuple = vocab_size - 1 snake_case__ : List[Any] = vocab_size - 1 def lowercase__ ( self ) -> List[Any]: """simple docstring""" snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : List[Any] = None if self.use_input_mask: snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Dict = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() snake_case__ : Tuple = config_and_inputs snake_case__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : str = 20 snake_case__ : Optional[Any] = model_class_name(lowercase_ ) snake_case__ : int = model.init_cache(input_ids.shape[0] , lowercase_ ) snake_case__ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) snake_case__ : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case__ : Tuple = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) snake_case__ : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) snake_case__ : str = model( input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , ) snake_case__ : int = model(lowercase_ ) snake_case__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : int = 20 snake_case__ : List[Any] = model_class_name(lowercase_ ) snake_case__ : Any = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case__ : int = model.init_cache(input_ids.shape[0] , lowercase_ ) snake_case__ : int = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case__ : int = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) snake_case__ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) snake_case__ : Optional[int] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , ) snake_case__ : Tuple = model(lowercase_ , attention_mask=lowercase_ ) snake_case__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = FlaxGPTJModelTester(self ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) def lowercase__ ( self ) -> List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @tooslow def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Tuple = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) snake_case__ : List[str] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=lowercase_ , truncation=lowercase_ ) snake_case__ : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) snake_case__ : Optional[Any] = False snake_case__ : Optional[int] = model.config.eos_token_id snake_case__ : Union[str, Any] = jax.jit(model.generate ) snake_case__ : Any = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case__ : List[Any] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) snake_case__ : List[str] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowercase_ , lowercase_ ) @is_pt_flax_cross_test def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case__ : Dict = self._prepare_for_class(lowercase_ , lowercase_ ) snake_case__ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case__ : Union[str, Any] = getattr(lowercase_ , lowercase_ ) snake_case__ : Optional[Any] = pt_inputs["""input_ids"""].shape snake_case__ : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): snake_case__ : List[str] = 0 snake_case__ : Optional[int] = 1 snake_case__ : str = 0 snake_case__ : List[str] = 1 snake_case__ : List[Any] = pt_model_class(lowercase_ ).eval() snake_case__ : Optional[int] = model_class(lowercase_ , dtype=jnp.floataa ) snake_case__ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) snake_case__ : Dict = fx_state with torch.no_grad(): snake_case__ : List[Any] = pt_model(**lowercase_ ).to_tuple() snake_case__ : Union[str, Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) snake_case__ : Dict = model_class.from_pretrained(lowercase_ , from_pt=lowercase_ ) snake_case__ : Any = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowercase__ ( self ) -> str: """simple docstring""" snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case__ : Tuple = self._prepare_for_class(lowercase_ , lowercase_ ) snake_case__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case__ : Optional[Any] = getattr(lowercase_ , lowercase_ ) snake_case__ : Dict = pt_model_class(lowercase_ ).eval() snake_case__ : Dict = model_class(lowercase_ , dtype=jnp.floataa ) snake_case__ : Any = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) snake_case__ : Optional[Any] = pt_inputs["""input_ids"""].shape snake_case__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): snake_case__ : Tuple = 0 snake_case__ : int = 1 snake_case__ : List[Any] = 0 snake_case__ : Any = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case__ : Tuple = pt_model(**lowercase_ ).to_tuple() snake_case__ : str = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) snake_case__ : Dict = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_ ) with torch.no_grad(): snake_case__ : str = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowercase_ , lowercase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowercase__ ( self ) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: snake_case__ : Any = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) snake_case__ : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase_ )
708
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
'''simple docstring''' def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): if index == r: for j in range(_lowercase ): print(data[j] , end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location snake_case__ : Any = arr[i] combination_util(_lowercase , _lowercase , _lowercase , index + 1 , _lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _A ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[Any] ): snake_case__ : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(_lowercase , _lowercase , _lowercase , 0 , _lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCAmelCase : Union[str, Any] = [1_0, 2_0, 3_0, 4_0, 5_0] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
709
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCAmelCase : Union[str, Any] = "\nimport os\n" _lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n" _lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" _lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" _lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" _lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" _lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" _lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" _lowerCAmelCase : Tuple = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case__ ) def _A ( snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' ) with open(snake_case__ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case__ ) snake_case__ : int = get_imports(snake_case__ ) assert parsed_imports == ["os"]
694
0
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _A ( snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any]=None ): assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' snake_case__ : Optional[Any] = nn.Parameter(UpperCAmelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' snake_case__ : str = nn.Parameter(UpperCAmelCase__ ) def _A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : int ): snake_case__ : Any = np.asarray(weights[0] ) snake_case__ : Optional[int] = np.asarray(weights[1] ) snake_case__ : List[Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , ) def _A ( snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Tuple ): snake_case__ : Any = np.asarray(weights[0] ) snake_case__ : Tuple = np.asarray(weights[1] ) snake_case__ : Union[str, Any] = np.asarray(weights[2] ) snake_case__ : Optional[int] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , ) def _A ( snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[Any] ): snake_case__ : Dict = weights[0][0][0] snake_case__ : Tuple = np.asarray(layer_norm_a[0] ) snake_case__ : str = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # lsh weights + output snake_case__ : List[Any] = weights[0][1] if len(UpperCAmelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ ) else: set_layer_weights_in_torch_local(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ ) # intermediate weighs snake_case__ : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCAmelCase__ ) == 4: snake_case__ : Dict = intermediate_weights[2] # layernorm 2 snake_case__ : str = np.asarray(intermediate_weights[0][0] ) snake_case__ : Union[str, Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # intermediate dense snake_case__ : Union[str, Any] = np.asarray(intermediate_weights[1][0] ) snake_case__ : Optional[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) # intermediate out snake_case__ : Tuple = np.asarray(intermediate_weights[4][0] ) snake_case__ : List[str] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) def _A ( snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int ): snake_case__ : List[Any] = torch_model.reformer # word embeds snake_case__ : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase__ ) , ) if isinstance(weights[3] , UpperCAmelCase__ ): snake_case__ : Optional[Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case__ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' snake_case__ : List[str] = nn.Parameter(torch.tensor(UpperCAmelCase__ ) ) snake_case__ : Optional[int] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCAmelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # output layer norm snake_case__ : Optional[int] = np.asarray(weights[7][0] ) snake_case__ : str = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # output embeddings snake_case__ : Optional[Any] = np.asarray(weights[9][0] ) snake_case__ : List[str] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) def _A ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict ): snake_case__ : str = ReformerConfig.from_json_file(UpperCAmelCase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) snake_case__ : Dict = ReformerModelWithLMHead(UpperCAmelCase__ ) with open(UpperCAmelCase__ , '''rb''' ) as f: snake_case__ : int = pickle.load(UpperCAmelCase__ )['''weights'''] set_model_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
710
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = 'markuplm' def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str: """simple docstring""" super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , ) snake_case__ : Optional[int] = vocab_size snake_case__ : Tuple = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : List[Any] = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : str = max_position_embeddings snake_case__ : str = type_vocab_size snake_case__ : List[str] = initializer_range snake_case__ : List[str] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : int = classifier_dropout # additional properties snake_case__ : Union[str, Any] = max_depth snake_case__ : Dict = max_xpath_tag_unit_embeddings snake_case__ : Any = max_xpath_subs_unit_embeddings snake_case__ : int = tag_pad_id snake_case__ : Tuple = subs_pad_id snake_case__ : Dict = xpath_unit_hidden_size
694
0