code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCamelCase__ : '''simple docstring''' def __init__( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : str=7 , UpperCamelCase_ : str=9 , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : Dict=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Optional[int]=37 , UpperCamelCase_ : Tuple=8 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : List[str]=0.0_02 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[str]=None , ) -> Any: '''simple docstring''' _lowercase : int = parent _lowercase : Optional[int] = batch_size _lowercase : Dict = encoder_seq_length _lowercase : Union[str, Any] = decoder_seq_length # For common tests _lowercase : int = self.decoder_seq_length _lowercase : Tuple = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[str] = use_labels _lowercase : int = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : int = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Union[str, Any] = d_ff _lowercase : Tuple = relative_attention_num_buckets _lowercase : List[Any] = dropout_rate _lowercase : Dict = initializer_factor _lowercase : Optional[Any] = eos_token_id _lowercase : str = pad_token_id _lowercase : int = decoder_start_token_id _lowercase : Dict = None _lowercase : List[str] = decoder_layers def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' return TaConfig.from_pretrained('google/umt5-base' ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[int]=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: _lowercase : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _lowercase : List[str] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _lowercase : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase_ ) if decoder_head_mask is None: _lowercase : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ ) if cross_attn_head_mask is None: _lowercase : List[str] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: '''simple docstring''' _lowercase : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _lowercase : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 ) _lowercase : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 ) _lowercase : Optional[Any] = self.get_config() _lowercase : List[Any] = config.num_attention_heads _lowercase : int = self.prepare_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, input_dict def __UpperCAmelCase ( self : List[str] ) -> Any: '''simple docstring''' _lowercase , _lowercase : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , ) -> Tuple: '''simple docstring''' _lowercase : Dict = UMTaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : int = model( input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , ) _lowercase : Any = model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) _lowercase : Tuple = result.last_hidden_state _lowercase : Optional[int] = result.past_key_values _lowercase : str = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCamelCase_ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , ) -> Any: '''simple docstring''' _lowercase : str = UMTaModel(config=UpperCamelCase_ ).get_decoder().to(UpperCamelCase_ ).eval() # first forward pass _lowercase : Optional[Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ ) _lowercase : Dict = model(UpperCamelCase_ ) _lowercase : Dict = model(UpperCamelCase_ , use_cache=UpperCamelCase_ ) self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) ) self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 ) _lowercase , _lowercase : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowercase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _lowercase : int = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowercase : Optional[int] = model(UpperCamelCase_ )['last_hidden_state'] _lowercase : str = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['last_hidden_state'] # select random slice _lowercase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowercase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() _lowercase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , ) -> Tuple: '''simple docstring''' _lowercase : Optional[int] = UMTaModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).half().eval() _lowercase : Tuple = model(**UpperCamelCase_ )['last_hidden_state'] self.parent.assertFalse(torch.isnan(UpperCamelCase_ ).any().item() ) @require_torch class lowerCamelCase__ ( A , A , A , unittest.TestCase ): '''simple docstring''' A_ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = True A_ = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ = [0.8, 0.9] def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = self.model_tester.prepare_config_and_inputs() _lowercase : Optional[int] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase_ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __UpperCAmelCase ( self : str ) -> str: '''simple docstring''' _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[str] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() _lowercase : Dict = config_and_inputs[0] _lowercase : Union[str, Any] = UMTaForConditionalGeneration(UpperCamelCase_ ).eval() model.to(UpperCamelCase_ ) _lowercase : List[Any] = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase_ ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ), } for attn_name, (name, mask) in zip(UpperCamelCase_ , head_masking.items() ): _lowercase : Tuple = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _lowercase : Dict = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ) _lowercase : Optional[Any] = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , **UpperCamelCase_ , ) # We check the state of decoder_attentions and cross_attentions just from the last step _lowercase : Dict = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def __UpperCAmelCase ( self : str ) -> Optional[Any]: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def __UpperCAmelCase ( self : List[str] ) -> Any: '''simple docstring''' _lowercase : int = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ ) _lowercase : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCamelCase_ , legacy=UpperCamelCase_ ) _lowercase : List[str] = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] _lowercase : Dict = tokenizer(UpperCamelCase_ , return_tensors='pt' , padding=UpperCamelCase_ ).input_ids # fmt: off _lowercase : Optional[int] = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Optional[int] = model.generate(input_ids.to(UpperCamelCase_ ) ) _lowercase : Tuple = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ ๐Ÿ’ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํ”ผํ•ด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] _lowercase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
4
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase__ ( A ): '''simple docstring''' A_ = 42 class lowerCamelCase__ ( A , A ): '''simple docstring''' A_ = True @register_to_config def __init__( self : Dict , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 4 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : float = 0.1_82_15 , ) -> str: '''simple docstring''' super().__init__() # pass init params to Encoder _lowercase : str = Encoder( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , ) # pass init params to Decoder _lowercase : Optional[int] = Decoder( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , ) _lowercase : Optional[int] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) _lowercase : Any = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 ) _lowercase : int = False _lowercase : Union[str, Any] = False # only relevant if vae tiling is enabled _lowercase : Dict = self.config.sample_size _lowercase : Any = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) _lowercase : str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) _lowercase : List[Any] = 0.25 def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(UpperCamelCase_ , (Encoder, Decoder) ): _lowercase : str = value def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : bool = True ) -> Optional[int]: '''simple docstring''' _lowercase : Optional[Any] = use_tiling def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' self.enable_tiling(UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = True def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self : Optional[int] ) -> Dict[str, AttentionProcessor]: '''simple docstring''' _lowercase : int = {} def fn_recursive_add_processors(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : Dict[str, AttentionProcessor] ): if hasattr(UpperCamelCase_ , 'set_processor' ): _lowercase : List[str] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return processors def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int: '''simple docstring''' _lowercase : Tuple = len(self.attn_processors.keys() ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : List[Any] ): if hasattr(UpperCamelCase_ , 'set_processor' ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): module.set_processor(UpperCamelCase_ ) else: module.set_processor(processor.pop(F'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ ) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> AutoencoderKLOutput: '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ ) if self.use_slicing and x.shape[0] > 1: _lowercase : Optional[Any] = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )] _lowercase : Optional[Any] = torch.cat(UpperCamelCase_ ) else: _lowercase : Any = self.encoder(UpperCamelCase_ ) _lowercase : Tuple = self.quant_conv(UpperCamelCase_ ) _lowercase : Any = DiagonalGaussianDistribution(UpperCamelCase_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ ) _lowercase : Tuple = self.post_quant_conv(UpperCamelCase_ ) _lowercase : int = self.decoder(UpperCamelCase_ ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase_ ) @apply_forward_hook def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' if self.use_slicing and z.shape[0] > 1: _lowercase : int = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )] _lowercase : Dict = torch.cat(UpperCamelCase_ ) else: _lowercase : List[str] = self._decode(UpperCamelCase_ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> List[Any]: '''simple docstring''' _lowercase : Optional[int] = min(a.shape[2] , b.shape[2] , UpperCamelCase_ ) for y in range(UpperCamelCase_ ): _lowercase : Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ) -> str: '''simple docstring''' _lowercase : Tuple = min(a.shape[3] , b.shape[3] , UpperCamelCase_ ) for x in range(UpperCamelCase_ ): _lowercase : Optional[int] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self : str , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> AutoencoderKLOutput: '''simple docstring''' _lowercase : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) _lowercase : Dict = int(self.tile_latent_min_size * self.tile_overlap_factor ) _lowercase : List[Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. _lowercase : int = [] for i in range(0 , x.shape[2] , UpperCamelCase_ ): _lowercase : List[str] = [] for j in range(0 , x.shape[3] , UpperCamelCase_ ): _lowercase : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] _lowercase : Dict = self.encoder(UpperCamelCase_ ) _lowercase : Optional[int] = self.quant_conv(UpperCamelCase_ ) row.append(UpperCamelCase_ ) rows.append(UpperCamelCase_ ) _lowercase : Any = [] for i, row in enumerate(UpperCamelCase_ ): _lowercase : List[Any] = [] for j, tile in enumerate(UpperCamelCase_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _lowercase : List[str] = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ ) if j > 0: _lowercase : Any = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) ) _lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=2 ) _lowercase : int = DiagonalGaussianDistribution(UpperCamelCase_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' _lowercase : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) _lowercase : List[Any] = int(self.tile_sample_min_size * self.tile_overlap_factor ) _lowercase : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. _lowercase : str = [] for i in range(0 , z.shape[2] , UpperCamelCase_ ): _lowercase : Dict = [] for j in range(0 , z.shape[3] , UpperCamelCase_ ): _lowercase : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] _lowercase : List[str] = self.post_quant_conv(UpperCamelCase_ ) _lowercase : Union[str, Any] = self.decoder(UpperCamelCase_ ) row.append(UpperCamelCase_ ) rows.append(UpperCamelCase_ ) _lowercase : int = [] for i, row in enumerate(UpperCamelCase_ ): _lowercase : Optional[Any] = [] for j, tile in enumerate(UpperCamelCase_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _lowercase : Tuple = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ ) if j > 0: _lowercase : Optional[int] = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) ) _lowercase : Optional[int] = torch.cat(UpperCamelCase_ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' _lowercase : List[Any] = sample _lowercase : List[Any] = self.encode(UpperCamelCase_ ).latent_dist if sample_posterior: _lowercase : Optional[Any] = posterior.sample(generator=UpperCamelCase_ ) else: _lowercase : str = posterior.mode() _lowercase : Union[str, Any] = self.decode(UpperCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase_ )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __UpperCamelCase ( _lowercase, _lowercase ) -> str | Literal[False]: _lowercase : str = list(_lowercase ) _lowercase : int = list(_lowercase ) _lowercase : Tuple = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count += 1 _lowercase : str = '_' if count > 1: return False else: return "".join(_lowercase ) def __UpperCamelCase ( _lowercase ) -> list[str]: _lowercase : int = [] while True: _lowercase : Dict = ['$'] * len(_lowercase ) _lowercase : Dict = [] for i in range(len(_lowercase ) ): for j in range(i + 1, len(_lowercase ) ): _lowercase : Union[str, Any] = compare_string(binary[i], binary[j] ) if k is False: _lowercase : Union[str, Any] = '*' _lowercase : Optional[Any] = '*' temp.append('X' ) for i in range(len(_lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_lowercase ) == 0: return pi _lowercase : Any = list(set(_lowercase ) ) def __UpperCamelCase ( _lowercase, _lowercase ) -> list[str]: _lowercase : Optional[int] = [] for minterm in minterms: _lowercase : Tuple = '' for _ in range(_lowercase ): _lowercase : Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(_lowercase ) return temp def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> bool: _lowercase : int = list(_lowercase ) _lowercase : List[Any] = list(_lowercase ) _lowercase : Any = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __UpperCamelCase ( _lowercase, _lowercase ) -> list[str]: _lowercase : Optional[int] = [] _lowercase : Tuple = [0] * len(_lowercase ) for i in range(len(chart[0] ) ): _lowercase : Tuple = 0 _lowercase : Optional[int] = -1 for j in range(len(_lowercase ) ): if chart[j][i] == 1: count += 1 _lowercase : List[Any] = j if count == 1: _lowercase : Union[str, Any] = 1 for i in range(len(_lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_lowercase ) ): _lowercase : Union[str, Any] = 0 temp.append(prime_implicants[i] ) while True: _lowercase : Tuple = 0 _lowercase : Tuple = -1 _lowercase : Tuple = 0 for i in range(len(_lowercase ) ): _lowercase : int = chart[i].count(1 ) if count_n > max_n: _lowercase : Any = count_n _lowercase : List[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_lowercase ) ): _lowercase : Any = 0 def __UpperCamelCase ( _lowercase, _lowercase ) -> list[list[int]]: _lowercase : Optional[Any] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )] for i in range(len(_lowercase ) ): _lowercase : int = prime_implicants[i].count('_' ) for j in range(len(_lowercase ) ): if is_for_table(prime_implicants[i], binary[j], _lowercase ): _lowercase : List[str] = 1 return chart def __UpperCamelCase ( ) -> None: _lowercase : Tuple = int(input('Enter the no. of variables\n' ) ) _lowercase : Optional[int] = [ float(_lowercase ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowercase : Dict = decimal_to_binary(_lowercase, _lowercase ) _lowercase : List[Any] = check(_lowercase ) print('Prime Implicants are:' ) print(_lowercase ) _lowercase : str = prime_implicant_chart(_lowercase, _lowercase ) _lowercase : Dict = selection(_lowercase, _lowercase ) print('Essential Prime Implicants are:' ) print(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
4
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> list[int]: if num <= 0: raise ValueError('Input must be a positive integer' ) _lowercase : int = [True] * (num + 1) _lowercase : Union[str, Any] = 2 while p * p <= num: if primes[p]: for i in range(p * p, num + 1, _lowercase ): _lowercase : Union[str, Any] = False p += 1 return [prime for prime in range(2, num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _A : Any =int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
'''simple docstring''' import argparse from collections import defaultdict import yaml _A : List[Any] ='''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _lowercase ) -> Optional[int]: _lowercase : Union[str, Any] = defaultdict(_lowercase ) for doc in model_doc: counts[doc["local"]] += 1 _lowercase : List[Any] = [key for key, value in counts.items() if value > 1] _lowercase : str = [] for duplicate_key in duplicates: _lowercase : Dict = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(_lowercase ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(_lowercase, key=lambda _lowercase : s["title"].lower() ) def __UpperCamelCase ( _lowercase=False ) -> str: with open(_lowercase, encoding='utf-8' ) as f: _lowercase : Optional[Any] = yaml.safe_load(f.read() ) # Get to the API doc _lowercase : Tuple = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowercase : List[Any] = content[api_idx]['sections'] # Then to the model doc _lowercase : Union[str, Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _lowercase : Optional[int] = api_doc[model_idx]['sections'] _lowercase : List[str] = [(idx, section) for idx, section in enumerate(_lowercase ) if 'sections' in section] _lowercase : Any = False for idx, modality_doc in modalities_docs: _lowercase : Optional[Any] = modality_doc['sections'] _lowercase : Any = clean_model_doc_toc(_lowercase ) if old_modality_doc != new_modality_doc: _lowercase : Any = True if overwrite: _lowercase : List[Any] = new_modality_doc if diff: if overwrite: _lowercase : Any = model_doc _lowercase : Optional[int] = api_doc with open(_lowercase, 'w', encoding='utf-8' ) as f: f.write(yaml.dump(_lowercase, allow_unicode=_lowercase ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A : Optional[int] =parser.parse_args() check_model_doc(args.fix_and_overwrite)
4
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
1
'''simple docstring''' import math def __UpperCamelCase ( _lowercase ) -> int: if not isinstance(_lowercase, _lowercase ): _lowercase : Any = f'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase ) if number < 1: _lowercase : Optional[int] = f'''Input value of [number={number}] must be > 0''' raise ValueError(_lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: _lowercase : Union[str, Any] = int(math.log(number // 3, 2 ) ) + 2 _lowercase : Dict = [3, 5] _lowercase : List[str] = 2 _lowercase : str = 3 for block in range(1, _lowercase ): for _ in range(_lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(1_1): _A : int =0 try: _A : Union[str, Any] =proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
4
'''simple docstring''' from __future__ import annotations import requests def __UpperCamelCase ( _lowercase ) -> dict: _lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_lowercase ).json() def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]: _lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories] return [get_hackernews_story(_lowercase ) for story_id in story_ids] def __UpperCamelCase ( _lowercase = 10 ) -> str: _lowercase : Tuple = hackernews_top_stories(_lowercase ) return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
1
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Dict ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """megatron-bert""" def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Optional[Any] = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : Optional[Any] = use_cache
4
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCamelCase__ : '''simple docstring''' A_ = BlenderbotSmallConfig A_ = {} A_ = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : str=99 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Union[str, Any]=37 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Dict=20 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : Optional[int]=0 , ) -> str: '''simple docstring''' _lowercase : Tuple = parent _lowercase : Optional[Any] = batch_size _lowercase : Union[str, Any] = seq_length _lowercase : Dict = is_training _lowercase : List[str] = use_labels _lowercase : int = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : Any = intermediate_size _lowercase : str = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Optional[Any] = max_position_embeddings _lowercase : str = eos_token_id _lowercase : List[str] = pad_token_id _lowercase : str = bos_token_id def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowercase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowercase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _lowercase : int = prepare_blenderbot_small_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = TFBlenderbotSmallModel(config=UpperCamelCase_ ).get_decoder() _lowercase : str = inputs_dict['input_ids'] _lowercase : Union[str, Any] = input_ids[:1, :] _lowercase : List[str] = inputs_dict['attention_mask'][:1, :] _lowercase : Optional[Any] = inputs_dict['head_mask'] _lowercase : Optional[int] = 1 # first forward pass _lowercase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) _lowercase , _lowercase : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowercase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowercase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowercase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0] _lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowercase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] _lowercase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, ) -> Tuple: if attention_mask is None: _lowercase : Any = tf.cast(tf.math.not_equal(_lowercase, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _lowercase : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _lowercase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) A_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' _lowercase : Union[str, Any] = TFBlenderbotSmallModelTester(self ) _lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ ) @require_tokenizers @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' A_ = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] A_ = """facebook/blenderbot_small-90M""" @cached_property def __UpperCAmelCase ( self : Optional[int] ) -> Any: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.tokenizer(self.src_text , return_tensors='tf' ) _lowercase : List[Any] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , ) _lowercase : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
4
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Tuple = args.pruning_method _lowercase : int = args.threshold _lowercase : str = args.model_name_or_path.rstrip('/' ) _lowercase : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) ) _lowercase : List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _lowercase : Optional[int] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _lowercase : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _lowercase : Dict = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase ) _lowercase : Optional[Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _lowercase : Optional[Any] = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase ) _lowercase : str = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _lowercase : str = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase ) _lowercase : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _lowercase : Optional[int] = name[:-6] _lowercase : List[str] = model[f'''{prefix_}mask_scores'''] _lowercase , _lowercase : Union[str, Any] = -0.1, 1.1 _lowercase : str = torch.sigmoid(_lowercase ) _lowercase : int = s * (r - l) + l _lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 ) _lowercase : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _lowercase : List[Any] = os.path.join( os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' ) if not os.path.isdir(_lowercase ): shutil.copytree(_lowercase, _lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) _A : List[Any] =parser.parse_args() main(args)
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Any ={'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] =['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys _A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __UpperCamelCase ( _lowercase ) -> Optional[Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase_ : nn.Module , UpperCamelCase_ : int ) -> int: '''simple docstring''' super().__init__() _lowercase : Dict = module _lowercase : Optional[Any] = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase_ , bias=UpperCamelCase_ ) , nn.Linear(UpperCamelCase_ , module.out_features , bias=UpperCamelCase_ ) , ) _lowercase : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int] ) -> List[str]: '''simple docstring''' return self.module(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) + self.adapter(UpperCamelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' A_ = """bigscience/bloom-1b7""" # Constant values A_ = 2.1_09_65_95_52_69_25_74 A_ = """Hello my name is""" A_ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) A_ = 10 def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' super().setUp() # Models and tokenizer _lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) _lowercase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase_ , 'quantization_config' ) ) _lowercase : int = config.to_dict() _lowercase : Union[str, Any] = config.to_diff_dict() _lowercase : Optional[Any] = config.to_json_string() def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' from bitsandbytes.nn import Paramsabit _lowercase : Any = self.model_fpaa.get_memory_footprint() _lowercase : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _lowercase : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' _lowercase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) _lowercase : List[Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self : Dict ) -> List[Any]: '''simple docstring''' _lowercase : Tuple = BitsAndBytesConfig() _lowercase : Optional[int] = True _lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase_ , device_map='auto' ) _lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' ) _lowercase : Tuple = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self : Dict ) -> int: '''simple docstring''' with self.assertRaises(UpperCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase_ ) def __UpperCAmelCase ( self : str ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase_ ): _lowercase : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' with self.assertRaises(UpperCamelCase_ ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(UpperCamelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(UpperCamelCase_ ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(UpperCamelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' ) _lowercase : Union[str, Any] = self.model_fpaa.to(torch.floataa ) _lowercase : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error _lowercase : Tuple = self.model_fpaa.to('cpu' ) # Check this does not throw an error _lowercase : Dict = self.model_fpaa.half() # Check this does not throw an error _lowercase : Tuple = self.model_fpaa.float() def __UpperCAmelCase ( self : Optional[int] ) -> Any: '''simple docstring''' _lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=UpperCamelCase_ , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def __UpperCAmelCase ( cls : Tuple ) -> int: '''simple docstring''' _lowercase : Optional[int] = 't5-small' _lowercase : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense _lowercase : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name ) _lowercase : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' from transformers import TaForConditionalGeneration _lowercase : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules _lowercase : Optional[Any] = None # test with `t5-small` _lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) _lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowercase : Optional[Any] = model.generate(**UpperCamelCase_ ) # test with `flan-t5-small` _lowercase : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) _lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowercase : int = model.generate(**UpperCamelCase_ ) _lowercase : Optional[Any] = modules def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _lowercase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowercase : List[Any] = model.generate(**UpperCamelCase_ ) # test with `flan-t5-small` _lowercase : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) _lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowercase : List[Any] = model.generate(**UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' super().setUp() # model_name _lowercase : str = 'bigscience/bloom-560m' _lowercase : str = 't5-small' # Different types of model _lowercase : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) # Sequence classification model _lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) # CausalLM model _lowercase : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) # Seq2seq model _lowercase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase_ , device_map='auto' ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : Tuple ) -> Dict: '''simple docstring''' super().setUp() def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' _lowercase : str = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _lowercase : int = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' super().setUp() def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase_ , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch _lowercase : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters _lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _lowercase : List[Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _lowercase : int = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase_ ) ): _lowercase : Optional[Any] = LoRALayer(module.q_proj , rank=16 ) _lowercase : Optional[int] = LoRALayer(module.k_proj , rank=16 ) _lowercase : Tuple = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch _lowercase : Dict = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _lowercase : List[str] = model.forward(**UpperCamelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(UpperCamelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """gpt2-xl""" A_ = 3.31_91_85_48_54_15_21_87
4
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : str ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = tempfile.mkdtemp() # fmt: off _lowercase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on _lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowercase : List[Any] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } _lowercase : Any = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : Any , **UpperCamelCase_ : List[str] ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] , **UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' _lowercase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor.save_pretrained(self.tmpdirname ) _lowercase : str = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' _lowercase : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowercase : Tuple = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) _lowercase : int = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : Dict = self.get_tokenizer() _lowercase : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _lowercase : Union[str, Any] = self.prepare_image_inputs() _lowercase : Any = image_processor(UpperCamelCase_ , return_tensors='np' ) _lowercase : str = processor(images=UpperCamelCase_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self : Dict ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = self.get_image_processor() _lowercase : Dict = self.get_tokenizer() _lowercase : Dict = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _lowercase : int = 'lower newer' _lowercase : Tuple = processor(text=UpperCamelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : Tuple = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _lowercase : List[Any] = 'lower newer' _lowercase : Optional[int] = self.prepare_image_inputs() _lowercase : Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(UpperCamelCase_ ): processor() def __UpperCAmelCase ( self : str ) -> Any: '''simple docstring''' _lowercase : Tuple = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : Union[str, Any] = processor.batch_decode(UpperCamelCase_ ) _lowercase : List[str] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Dict: '''simple docstring''' _lowercase : str = self.get_image_processor() _lowercase : Tuple = self.get_tokenizer() _lowercase : str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _lowercase : Any = 'lower newer' _lowercase : str = self.prepare_image_inputs() _lowercase : Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
1
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__ ( A ): '''simple docstring''' @slow @require_torch def __UpperCAmelCase ( self : Any ) -> Any: '''simple docstring''' _lowercase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) _lowercase : Tuple = BertTokenizer.from_pretrained('bert-base-uncased' ) _lowercase : Optional[Any] = bertabert.config.encoder.vocab_size _lowercase : Optional[Any] = tokenizer.sep_token_id _lowercase : Any = tokenizer.cls_token_id _lowercase : Tuple = 128 _lowercase : Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) _lowercase : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) _lowercase : Any = train_dataset.select(range(32 ) ) _lowercase : str = val_dataset.select(range(16 ) ) _lowercase : Optional[Any] = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_ : Dict ): # Tokenizer will automatically set [BOS] <text> [EOS] _lowercase : int = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCamelCase_ , max_length=512 ) _lowercase : List[str] = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCamelCase_ , max_length=128 ) _lowercase : Dict = inputs.input_ids _lowercase : str = inputs.attention_mask _lowercase : List[Any] = outputs.input_ids _lowercase : Optional[Any] = outputs.input_ids.copy() _lowercase : Optional[int] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] _lowercase : Any = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_ : Union[str, Any] ): _lowercase : int = pred.label_ids _lowercase : Tuple = pred.predictions # all unnecessary tokens are removed _lowercase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) _lowercase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) _lowercase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset _lowercase : Union[str, Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset _lowercase : Union[str, Any] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) _lowercase : Dict = self.get_auto_remove_tmp_dir() _lowercase : Union[str, Any] = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy='steps' , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _lowercase : List[Any] = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
4
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _A : str ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : str ={ '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } _A : Any ={ '''unc-nlp/lxmert-base-uncased''': 5_1_2, } _A : List[str] ={ '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_INIT_CONFIGURATION A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = LxmertTokenizer def __init__( self : int , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : Optional[Any]="[SEP]" , UpperCamelCase_ : List[str]="[PAD]" , UpperCamelCase_ : int="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]: '''simple docstring''' super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars ): _lowercase : str = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[int] = strip_accents _lowercase : List[Any] = tokenize_chinese_chars _lowercase : str = normalizer_class(**UpperCamelCase_ ) _lowercase : Optional[int] = do_lower_case def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict=None ) -> Optional[Any]: '''simple docstring''' _lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _lowercase : Union[str, Any] = [self.sep_token_id] _lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' _lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
4
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __UpperCamelCase ( _lowercase ) -> None: _lowercase , _lowercase : List[Any] = analyze_text(_lowercase ) _lowercase : Any = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. _lowercase : Union[str, Any] = sum(single_char_strings.values() ) # one length string _lowercase : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _lowercase : Any = single_char_strings[ch] _lowercase : int = my_str / all_sum my_fir_sum += prob * math.loga(_lowercase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _lowercase : str = sum(two_char_strings.values() ) _lowercase : str = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _lowercase : Optional[Any] = cha + cha if sequence in two_char_strings: _lowercase : int = two_char_strings[sequence] _lowercase : Optional[int] = int(_lowercase ) / all_sum my_sec_sum += prob * math.loga(_lowercase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]: _lowercase : Optional[Any] = Counter() # type: ignore _lowercase : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(_lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __UpperCamelCase ( ) -> List[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
4
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowercase : List[Any] = 'The dog is cute and lives in the garden house' _lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] ) _lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowercase : Tuple = jnp.array( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) _lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state'] self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
4
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Union[str, Any] ={'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
1
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Tuple = [2, 4, 6, 8, 10, 12] _lowercase : Optional[Any] = 100 self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' self.assertRaisesRegex( UpperCamelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
4
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A : List[str] ='''pt''' elif is_tf_available(): _A : Tuple ='''tf''' else: _A : Optional[int] ='''jax''' class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = ByTaTokenizer A_ = False def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().setUp() _lowercase : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' _lowercase : Dict = [] for i in range(len(UpperCamelCase_ ) ): try: _lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) ) _lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) ) if max_length is not None and len(UpperCamelCase_ ) > max_length: _lowercase : List[Any] = toks[:max_length] if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0: while len(UpperCamelCase_ ) < min_length: _lowercase : Tuple = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Dict = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) if " " not in output_txt and len(UpperCamelCase_ ) > 1: _lowercase : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ ) ) if with_prefix_space: _lowercase : Union[str, Any] = ' ' + output_txt _lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) return output_txt, output_ids def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' _lowercase : List[str] = self.ta_base_tokenizer _lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) _lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = self.ta_base_tokenizer _lowercase : Tuple = 'Unicode โ‚ฌ.' _lowercase : List[Any] = tokenizer(UpperCamelCase_ ) _lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : List[str] = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'Unicode โ‚ฌ.</s>' ) _lowercase : Any = tokenizer('e รจ รฉ รช รซ' ) _lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : Tuple = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'e รจ รฉ รช รซ</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e รจ รฉ รช รซ' ) ) , 'e รจ รฉ รช รซ</s>' ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = self.ta_base_tokenizer _lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) if FRAMEWORK != "jax": _lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = self.ta_base_tokenizer _lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , UpperCamelCase_ ) self.assertIn('attention_mask' , UpperCamelCase_ ) self.assertNotIn('decoder_input_ids' , UpperCamelCase_ ) self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> int: '''simple docstring''' _lowercase : Tuple = self.ta_base_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : str = tokenizer( text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' _lowercase : str = self.ta_base_tokenizer _lowercase : str = ['A long paragraph for summarization. </s>'] _lowercase : Optional[int] = ['Summary of the text. </s>'] # fmt: off _lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] ) self.assertEqual(UpperCamelCase_ , batch['labels'][0] ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowercase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[Any] = tempfile.mkdtemp() _lowercase : Any = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) shutil.rmtree(UpperCamelCase_ ) _lowercase : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : int = json.load(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(UpperCamelCase_ ) _lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )] _lowercase : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : int = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )] _lowercase : Tuple = tokenizer_class.from_pretrained( UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) _lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] _lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Optional[int] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _lowercase : Optional[int] = 0 _lowercase : int = tokenizer.convert_ids_to_tokens( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for attr in attributes_list: setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
4
1
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _A : List[str] ='''\ Text data. Second line of data.''' _A : Optional[int] ='''file''' @pytest.fixture(scope='session' ) def __UpperCamelCase ( _lowercase ) -> Dict: _lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') _lowercase : Union[str, Any] = bytes(_lowercase, 'utf-8' ) with zstd.open(_lowercase, 'wb' ) as f: f.write(_lowercase ) return path @pytest.fixture def __UpperCamelCase ( _lowercase ) -> Any: with open(os.path.join(tmpfs.local_root_dir, _lowercase ), 'w' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Any: _lowercase : Any = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} _lowercase : List[Any] = input_paths[compression_format] _lowercase : Union[str, Any] = tmp_path / 'cache' _lowercase : Union[str, Any] = DownloadConfig(cache_dir=_lowercase, extract_compressed_file=_lowercase ) _lowercase : Optional[int] = cached_path(_lowercase, download_config=_lowercase ) with open(_lowercase ) as f: _lowercase : List[str] = f.read() with open(_lowercase ) as f: _lowercase : Optional[int] = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted', [True, False] ) @pytest.mark.parametrize('default_cache_dir', [True, False] ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Tuple: _lowercase : Optional[Any] = 'custom_cache' _lowercase : Tuple = 'custom_extracted_dir' _lowercase : List[str] = tmp_path / 'custom_extracted_path' if default_extracted: _lowercase : Union[str, Any] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', _lowercase ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(_lowercase ) ) _lowercase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _lowercase : List[Any] = xz_file _lowercase : Any = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=_lowercase ) ) _lowercase : Tuple = cached_path(_lowercase, download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: # absolute path _lowercase : Tuple = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _lowercase : Dict = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: # absolute path _lowercase : List[Any] = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _lowercase : Tuple = './__missing_file__.txt' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : List[Any] = get_from_cache(f'''tmp://{tmpfs_file}''' ) with open(_lowercase ) as f: _lowercase : Optional[Any] = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase ) def __UpperCamelCase ( ) -> Dict: with pytest.raises(_lowercase ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase ) def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_lowercase ): http_get('https://huggingface.co', temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase ) def __UpperCamelCase ( _lowercase ) -> Optional[Any]: _lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_lowercase ): ftp_get('ftp://huggingface.co', temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase ) def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_lowercase ): fsspec_get('s3://huggingface.co', temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('s3://huggingface.co' )
4
'''simple docstring''' _A : Dict =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
1
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests _A : int ='''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user _A : int =BASE_URL + '''/user''' # https://github.com/settings/tokens _A : List[Any] =os.environ.get('''USER_TOKEN''', '''''') def __UpperCamelCase ( _lowercase ) -> dict[Any, Any]: _lowercase : int = { 'Authorization': f'''token {auth_token}''', 'Accept': 'application/vnd.github.v3+json', } return requests.get(_lowercase, headers=_lowercase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F'''{key}: {value}''') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
4
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : int = torch.exp(_lowercase ) _lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i) _lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowercase ) - B / A class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__() _lowercase : int = config.output_attentions _lowercase : int = config.output_hidden_states _lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int: '''simple docstring''' if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): _lowercase : Optional[Any] = x else: _lowercase : Optional[int] = x def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : int = () _lowercase : List[Any] = () _lowercase : Tuple = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowercase : Optional[int] = all_hidden_states + (hidden_states,) _lowercase : str = layer_module( UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = layer_outputs[0] if self.output_attentions: _lowercase : Tuple = all_attentions + (layer_outputs[1],) _lowercase : Optional[int] = (hidden_states,) if self.output_hidden_states: _lowercase : str = current_outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[int] = current_outputs + (all_attentions,) _lowercase : List[Any] = self.highway[i](UpperCamelCase_ ) # logits, pooled_output if not self.training: _lowercase : Dict = highway_exit[0] _lowercase : Tuple = entropy(UpperCamelCase_ ) _lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowercase : str = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase_ , i + 1 ) else: _lowercase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowercase : str = all_hidden_states + (hidden_states,) _lowercase : Optional[Any] = (hidden_states,) if self.output_hidden_states: _lowercase : Dict = outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[Any] = outputs + (all_attentions,) _lowercase : Optional[int] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : int = config _lowercase : int = BertEmbeddings(UpperCamelCase_ ) _lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ ) _lowercase : Any = BertPooler(UpperCamelCase_ ) self.init_weights() def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' return self.embeddings.word_embeddings def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = value def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _lowercase : Any = input_ids.size() elif inputs_embeds is not None: _lowercase : Any = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if encoder_attention_mask is None: _lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: _lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowercase : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowercase : int = encoder_attention_mask[:, None, None, :] _lowercase : str = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) _lowercase : Dict = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) _lowercase : List[Any] = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _lowercase : int = encoder_outputs[0] _lowercase : str = self.pooler(UpperCamelCase_ ) _lowercase : List[Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = message _lowercase : Dict = exit_layer # start from 1! class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' super().__init__() _lowercase : Optional[Any] = BertPooler(UpperCamelCase_ ) _lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowercase : int = nn.Linear(config.hidden_size , config.num_labels ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' _lowercase : str = encoder_outputs[0] _lowercase : int = self.pooler(UpperCamelCase_ ) # "return" pooler_output # BertModel _lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowercase : Dict = bmodel_output[1] _lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ ) _lowercase : str = self.classifier(UpperCamelCase_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : Dict = config.num_labels _lowercase : Any = config.num_hidden_layers _lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ ) _lowercase : Any = nn.Dropout(config.hidden_dropout_prob ) _lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = self.num_layers try: _lowercase : Tuple = self.bert( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowercase : List[Any] = outputs[1] _lowercase : int = self.dropout(UpperCamelCase_ ) _lowercase : Optional[int] = self.classifier(UpperCamelCase_ ) _lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowercase : Union[str, Any] = e.message _lowercase : Any = e.exit_layer _lowercase : Optional[int] = outputs[0] if not self.training: _lowercase : Union[str, Any] = entropy(UpperCamelCase_ ) _lowercase : Tuple = [] _lowercase : Tuple = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowercase : Tuple = MSELoss() _lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Union[str, Any] = CrossEntropyLoss() _lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowercase : Optional[Any] = [] for highway_exit in outputs[-1]: _lowercase : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowercase : Union[str, Any] = MSELoss() _lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Dict = CrossEntropyLoss() _lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: _lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowercase : Optional[Any] = (loss,) + outputs if not self.training: _lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowercase : Dict = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
4
1
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( _lowercase ) -> int: if not nums: return 0 _lowercase : Tuple = nums[0] _lowercase : Optional[int] = 0 for num in nums[1:]: _lowercase , _lowercase : int = ( max_excluding + num, max(_lowercase, _lowercase ), ) return max(_lowercase, _lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Tuple = [2, 4, 6, 8, 10, 12] _lowercase : Optional[Any] = 100 self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' self.assertRaisesRegex( UpperCamelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
4
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A : Dict =logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Union[str, Any] , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Union[str, Any] = size if size is not None else {'shortest_edge': 224} _lowercase : str = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) _lowercase : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='crop_size' ) _lowercase : List[str] = do_resize _lowercase : int = size _lowercase : int = resample _lowercase : Union[str, Any] = do_center_crop _lowercase : Optional[int] = crop_size _lowercase : Optional[int] = do_rescale _lowercase : List[str] = rescale_factor _lowercase : Optional[Any] = do_normalize _lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowercase : Any = image_std if image_std is not None else OPENAI_CLIP_STD _lowercase : List[Any] = do_convert_rgb def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : int , ) -> np.ndarray: '''simple docstring''' _lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _lowercase : Optional[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[int] , ) -> np.ndarray: '''simple docstring''' _lowercase : List[Any] = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : int , ) -> List[str]: '''simple docstring''' return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[int] , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : int = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[int] , ) -> PIL.Image.Image: '''simple docstring''' _lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _lowercase : Tuple = size if size is not None else self.size _lowercase : Tuple = get_size_dict(UpperCamelCase_ , param_name='size' , default_to_square=UpperCamelCase_ ) _lowercase : Dict = resample if resample is not None else self.resample _lowercase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size _lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , param_name='crop_size' , default_to_square=UpperCamelCase_ ) _lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Any = image_mean if image_mean is not None else self.image_mean _lowercase : Optional[Any] = image_std if image_std is not None else self.image_std _lowercase : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowercase : int = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowercase : Optional[Any] = [convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. _lowercase : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: _lowercase : List[str] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: _lowercase : Optional[int] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: _lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: _lowercase : Dict = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] _lowercase : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] _lowercase : int = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Tuple =['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A : List[str] =logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""pixel_values"""] def __init__( self : List[str] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Dict , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Any = size if size is not None else {'shortest_edge': 224} _lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) _lowercase : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='crop_size' ) _lowercase : Optional[Any] = do_resize _lowercase : Dict = size _lowercase : Any = resample _lowercase : Tuple = do_center_crop _lowercase : Dict = crop_size _lowercase : Tuple = do_rescale _lowercase : Tuple = rescale_factor _lowercase : Tuple = do_normalize _lowercase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowercase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD _lowercase : Union[str, Any] = do_convert_rgb def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray: '''simple docstring''' _lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _lowercase : Any = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ) -> np.ndarray: '''simple docstring''' _lowercase : Any = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[str] , ) -> int: '''simple docstring''' return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : int = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ) -> PIL.Image.Image: '''simple docstring''' _lowercase : Dict = do_resize if do_resize is not None else self.do_resize _lowercase : Tuple = size if size is not None else self.size _lowercase : List[str] = get_size_dict(UpperCamelCase_ , param_name='size' , default_to_square=UpperCamelCase_ ) _lowercase : Any = resample if resample is not None else self.resample _lowercase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : Tuple = crop_size if crop_size is not None else self.crop_size _lowercase : Any = get_size_dict(UpperCamelCase_ , param_name='crop_size' , default_to_square=UpperCamelCase_ ) _lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowercase : List[str] = image_mean if image_mean is not None else self.image_mean _lowercase : Tuple = image_std if image_std is not None else self.image_std _lowercase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowercase : int = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowercase : int = [convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. _lowercase : Any = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: _lowercase : Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: _lowercase : List[Any] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: _lowercase : Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: _lowercase : List[str] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] _lowercase : int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] _lowercase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """markuplm""" def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[Any] = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : List[Any] = type_vocab_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Optional[Any] = position_embedding_type _lowercase : str = use_cache _lowercase : str = classifier_dropout # additional properties _lowercase : int = max_depth _lowercase : Dict = max_xpath_tag_unit_embeddings _lowercase : str = max_xpath_subs_unit_embeddings _lowercase : List[str] = tag_pad_id _lowercase : Optional[int] = subs_pad_id _lowercase : Any = xpath_unit_hidden_size
4
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: _lowercase : int = 384 _lowercase : Union[str, Any] = 7 if "tiny" in model_name: _lowercase : Optional[Any] = 96 _lowercase : Dict = (2, 2, 6, 2) _lowercase : Dict = (3, 6, 12, 24) elif "small" in model_name: _lowercase : Union[str, Any] = 96 _lowercase : Dict = (2, 2, 18, 2) _lowercase : Dict = (3, 6, 12, 24) elif "base" in model_name: _lowercase : Any = 128 _lowercase : Optional[Any] = (2, 2, 18, 2) _lowercase : str = (4, 8, 16, 32) _lowercase : List[Any] = 12 _lowercase : Any = 512 elif "large" in model_name: _lowercase : List[str] = 192 _lowercase : List[Any] = (2, 2, 18, 2) _lowercase : Union[str, Any] = (6, 12, 24, 48) _lowercase : int = 12 _lowercase : int = 768 # set label information _lowercase : List[Any] = 150 _lowercase : List[str] = 'huggingface/label-files' _lowercase : str = 'ade20k-id2label.json' _lowercase : List[Any] = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type='dataset' ), 'r' ) ) _lowercase : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()} _lowercase : str = {v: k for k, v in idalabel.items()} _lowercase : Optional[Any] = SwinConfig( embed_dim=_lowercase, depths=_lowercase, num_heads=_lowercase, window_size=_lowercase, out_features=['stage1', 'stage2', 'stage3', 'stage4'], ) _lowercase : Tuple = UperNetConfig( backbone_config=_lowercase, auxiliary_in_channels=_lowercase, num_labels=_lowercase, idalabel=_lowercase, labelaid=_lowercase, ) return config def __UpperCamelCase ( _lowercase ) -> int: _lowercase : Any = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]: _lowercase : Optional[int] = dct.pop(_lowercase ) _lowercase : List[str] = val def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowercase : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowercase : Optional[int] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) _lowercase : str = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _lowercase : Dict = in_proj_weight[:dim, :] _lowercase : List[str] = in_proj_bias[: dim] _lowercase : Dict = in_proj_weight[ dim : dim * 2, : ] _lowercase : List[Any] = in_proj_bias[ dim : dim * 2 ] _lowercase : Optional[int] = in_proj_weight[ -dim :, : ] _lowercase : int = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase , _lowercase : Dict = x.shape _lowercase : Dict = x.reshape(_lowercase, 4, in_channel // 4 ) _lowercase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(_lowercase, _lowercase ) return x def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase , _lowercase : Optional[int] = x.shape _lowercase : Optional[Any] = x.reshape(_lowercase, in_channel // 4, 4 ) _lowercase : str = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(_lowercase, _lowercase ) return x def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : str = x.shape[0] _lowercase : List[Any] = x.reshape(4, in_channel // 4 ) _lowercase : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(_lowercase ) return x def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Dict = x.shape[0] _lowercase : Any = x.reshape(in_channel // 4, 4 ) _lowercase : List[str] = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(_lowercase ) return x def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } _lowercase : Dict = model_name_to_url[model_name] _lowercase : Any = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu', file_name=_lowercase )[ 'state_dict' ] for name, param in state_dict.items(): print(_lowercase, param.shape ) _lowercase : str = get_upernet_config(_lowercase ) _lowercase : List[Any] = UperNetForSemanticSegmentation(_lowercase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowercase : Tuple = state_dict.pop(_lowercase ) if "bn" in key: _lowercase : str = key.replace('bn', 'batch_norm' ) _lowercase : Union[str, Any] = val # rename keys _lowercase : str = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase, _lowercase, _lowercase ) read_in_q_k_v(_lowercase, config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowercase : Any = reverse_correct_unfold_reduction_order(_lowercase ) if "norm" in key: _lowercase : Tuple = reverse_correct_unfold_norm_order(_lowercase ) model.load_state_dict(_lowercase ) # verify on image _lowercase : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _lowercase : List[Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : str = SegformerImageProcessor() _lowercase : Optional[int] = processor(_lowercase, return_tensors='pt' ).pixel_values with torch.no_grad(): _lowercase : str = model(_lowercase ) _lowercase : Tuple = outputs.logits print(logits.shape ) print('First values of logits:', logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowercase : Dict = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": _lowercase : Union[str, Any] = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": _lowercase : int = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": _lowercase : Any = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('Logits:', outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], _lowercase, atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _A : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the ๐Ÿค— hub.''' ) _A : Optional[Any] =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
4
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : '''simple docstring''' def __init__( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Optional[int]=36 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : Any = parent _lowercase : Tuple = batch_size _lowercase : Dict = seq_length _lowercase : str = is_training _lowercase : Tuple = use_input_mask _lowercase : int = use_token_type_ids _lowercase : str = use_labels _lowercase : Any = vocab_size _lowercase : Tuple = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : int = intermediate_size _lowercase : int = hidden_act _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Any = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : Optional[Any] = type_vocab_size _lowercase : Dict = type_sequence_label_size _lowercase : Tuple = initializer_range _lowercase : str = num_labels _lowercase : str = num_choices _lowercase : Optional[int] = scope def __UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' _lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_input_mask: _lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Tuple = None _lowercase : Any = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : List[str] = self.get_config() _lowercase : Optional[int] = 300 return config def __UpperCAmelCase ( self : Any ) -> Any: '''simple docstring''' ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Any = self.prepare_config_and_inputs() _lowercase : Union[str, Any] = True _lowercase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Any = MraModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) _lowercase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) _lowercase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = True _lowercase : Union[str, Any] = MraModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _lowercase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) _lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = MraForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Optional[int]: '''simple docstring''' _lowercase : Union[str, Any] = MraForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Union[str, Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> List[str]: '''simple docstring''' _lowercase : Union[str, Any] = self.num_labels _lowercase : Dict = MraForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> Optional[int]: '''simple docstring''' _lowercase : Dict = self.num_labels _lowercase : int = MraForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = self.num_choices _lowercase : int = MraForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : Tuple = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' _lowercase : int = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : str = config_and_inputs _lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) A_ = False A_ = False A_ = False A_ = False A_ = () def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' _lowercase : Tuple = MraModelTester(self ) _lowercase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : Dict ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: '''simple docstring''' _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowercase : Optional[int] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> List[Any]: '''simple docstring''' _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> str: '''simple docstring''' _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> str: '''simple docstring''' _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = MraModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip(reason='MRA does not output attentions' ) def __UpperCAmelCase ( self : Optional[Any] ) -> str: '''simple docstring''' return @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _lowercase : Optional[Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowercase : str = model(UpperCamelCase_ )[0] _lowercase : str = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : List[Any] = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' _lowercase : Optional[int] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _lowercase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowercase : Union[str, Any] = model(UpperCamelCase_ )[0] _lowercase : int = 5_0265 _lowercase : Optional[int] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : Dict = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Union[str, Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _lowercase : Optional[int] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _lowercase : List[str] = model(UpperCamelCase_ )[0] _lowercase : Optional[Any] = 5_0265 _lowercase : Dict = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _A : Union[str, Any] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> None: '''simple docstring''' warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
4
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Tuple =logging.get_logger(__name__) _A : Dict ={ '''microsoft/unispeech-sat-base-100h-libri-ft''': ( '''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json''' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """unispeech-sat""" def __init__( self : List[Any] , UpperCamelCase_ : Any=32 , UpperCamelCase_ : List[Any]=768 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : Optional[int]=3072 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : str=1E-5 , UpperCamelCase_ : Dict="group" , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Dict=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_ : Tuple=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[Any]=128 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=0.05 , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[int]=10 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[Any]=320 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[Any]=100 , UpperCamelCase_ : int=256 , UpperCamelCase_ : Tuple=256 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Tuple="mean" , UpperCamelCase_ : int=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : List[Any]=256 , UpperCamelCase_ : List[Any]=(512, 512, 512, 512, 1500) , UpperCamelCase_ : Optional[int]=(5, 3, 3, 1, 1) , UpperCamelCase_ : Optional[Any]=(1, 2, 3, 1, 1) , UpperCamelCase_ : List[Any]=512 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=504 , **UpperCamelCase_ : int , ) -> List[Any]: '''simple docstring''' super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ ) _lowercase : int = hidden_size _lowercase : Union[str, Any] = feat_extract_norm _lowercase : Union[str, Any] = feat_extract_activation _lowercase : Optional[Any] = list(UpperCamelCase_ ) _lowercase : Optional[int] = list(UpperCamelCase_ ) _lowercase : Dict = list(UpperCamelCase_ ) _lowercase : Tuple = conv_bias _lowercase : Dict = num_conv_pos_embeddings _lowercase : List[Any] = num_conv_pos_embedding_groups _lowercase : List[Any] = len(self.conv_dim ) _lowercase : List[str] = num_hidden_layers _lowercase : List[Any] = intermediate_size _lowercase : List[Any] = hidden_act _lowercase : int = num_attention_heads _lowercase : Dict = hidden_dropout _lowercase : Optional[Any] = attention_dropout _lowercase : List[str] = activation_dropout _lowercase : List[Any] = feat_proj_dropout _lowercase : Optional[int] = final_dropout _lowercase : List[str] = layerdrop _lowercase : Optional[Any] = layer_norm_eps _lowercase : Optional[int] = initializer_range _lowercase : Tuple = vocab_size _lowercase : List[str] = num_clusters _lowercase : Tuple = do_stable_layer_norm _lowercase : Any = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowercase : Tuple = apply_spec_augment _lowercase : List[str] = mask_time_prob _lowercase : List[str] = mask_time_length _lowercase : Dict = mask_time_min_masks _lowercase : List[str] = mask_feature_prob _lowercase : Optional[int] = mask_feature_length _lowercase : Tuple = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowercase : List[Any] = num_codevectors_per_group _lowercase : Optional[int] = num_codevector_groups _lowercase : str = contrastive_logits_temperature _lowercase : Union[str, Any] = feat_quantizer_dropout _lowercase : Tuple = num_negatives _lowercase : Dict = codevector_dim _lowercase : Tuple = proj_codevector_dim _lowercase : Optional[int] = diversity_loss_weight # ctc loss _lowercase : List[Any] = ctc_loss_reduction _lowercase : Dict = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowercase : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowercase : Union[str, Any] = list(UpperCamelCase_ ) _lowercase : str = list(UpperCamelCase_ ) _lowercase : Tuple = list(UpperCamelCase_ ) _lowercase : Optional[Any] = xvector_output_dim @property def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) _A : str =logging.getLogger(__name__) def __UpperCamelCase ( _lowercase, _lowercase ) -> int: _lowercase : Any = np.argmax(_lowercase, axis=1 ) return np.sum(outputs == labels ) def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: with open(_lowercase, encoding='utf_8' ) as f: _lowercase : List[str] = csv.reader(_lowercase ) _lowercase : Tuple = [] next(_lowercase ) # skip the first line for line in tqdm(_lowercase ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Dict: _lowercase : Any = [] for dataset in encoded_datasets: _lowercase : int = len(_lowercase ) _lowercase : List[str] = np.zeros((n_batch, 2, input_len), dtype=np.intaa ) _lowercase : Optional[int] = np.zeros((n_batch, 2), dtype=np.intaa ) _lowercase : str = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa ) _lowercase : Any = np.zeros((n_batch,), dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_lowercase ): _lowercase : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _lowercase : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _lowercase : Optional[int] = with_conta _lowercase : List[Any] = with_conta _lowercase : Optional[int] = len(_lowercase ) - 1 _lowercase : Optional[Any] = len(_lowercase ) - 1 _lowercase : List[Any] = with_conta _lowercase : Tuple = with_conta _lowercase : Any = mc_label _lowercase : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) ) return tensor_datasets def __UpperCamelCase ( ) -> List[Any]: _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument('--model_name', type=_lowercase, default='openai-gpt', help='pretrained model name' ) parser.add_argument('--do_train', action='store_true', help='Whether to run training.' ) parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir', default=_lowercase, type=_lowercase, required=_lowercase, help='The output directory where the model predictions and checkpoints will be written.', ) parser.add_argument('--train_dataset', type=_lowercase, default='' ) parser.add_argument('--eval_dataset', type=_lowercase, default='' ) parser.add_argument('--seed', type=_lowercase, default=42 ) parser.add_argument('--num_train_epochs', type=_lowercase, default=3 ) parser.add_argument('--train_batch_size', type=_lowercase, default=8 ) parser.add_argument('--eval_batch_size', type=_lowercase, default=16 ) parser.add_argument('--adam_epsilon', default=1E-8, type=_lowercase, help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm', type=_lowercase, default=1 ) parser.add_argument( '--max_steps', default=-1, type=_lowercase, help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ), ) parser.add_argument( '--gradient_accumulation_steps', type=_lowercase, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', ) parser.add_argument('--learning_rate', type=_lowercase, default=6.2_5E-5 ) parser.add_argument('--warmup_steps', default=0, type=_lowercase, help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule', type=_lowercase, default='warmup_linear' ) parser.add_argument('--weight_decay', type=_lowercase, default=0.0_1 ) parser.add_argument('--lm_coef', type=_lowercase, default=0.9 ) parser.add_argument('--n_valid', type=_lowercase, default=374 ) parser.add_argument('--server_ip', type=_lowercase, default='', help='Can be used for distant debugging.' ) parser.add_argument('--server_port', type=_lowercase, default='', help='Can be used for distant debugging.' ) _lowercase : Dict = parser.parse_args() print(_lowercase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=_lowercase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _lowercase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowercase : Union[str, Any] = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(_lowercase, _lowercase ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _lowercase : Union[str, Any] = ['_start_', '_delimiter_', '_classify_'] _lowercase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_lowercase ) _lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase ) _lowercase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_lowercase ) ) model.to(_lowercase ) # Load and encode the datasets def tokenize_and_encode(_lowercase ): if isinstance(_lowercase, _lowercase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) ) elif isinstance(_lowercase, _lowercase ): return obj return [tokenize_and_encode(_lowercase ) for o in obj] logger.info('Encoding dataset...' ) _lowercase : Any = load_rocstories_dataset(args.train_dataset ) _lowercase : List[str] = load_rocstories_dataset(args.eval_dataset ) _lowercase : Dict = (train_dataset, eval_dataset) _lowercase : Optional[int] = tokenize_and_encode(_lowercase ) # Compute the max input length for the Transformer _lowercase : Optional[Any] = model.config.n_positions // 2 - 2 _lowercase : List[Any] = max( len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _lowercase : List[str] = min(_lowercase, model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _lowercase : Optional[int] = pre_process_datasets(_lowercase, _lowercase, _lowercase, *_lowercase ) _lowercase , _lowercase : Union[str, Any] = tensor_datasets[0], tensor_datasets[1] _lowercase : Any = TensorDataset(*_lowercase ) _lowercase : Optional[Any] = RandomSampler(_lowercase ) _lowercase : Union[str, Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.train_batch_size ) _lowercase : Optional[int] = TensorDataset(*_lowercase ) _lowercase : List[Any] = SequentialSampler(_lowercase ) _lowercase : Optional[Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _lowercase : Tuple = args.max_steps _lowercase : List[str] = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1 else: _lowercase : Dict = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs _lowercase : Optional[int] = list(model.named_parameters() ) _lowercase : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] _lowercase : Tuple = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] _lowercase : Tuple = AdamW(_lowercase, lr=args.learning_rate, eps=args.adam_epsilon ) _lowercase : Optional[int] = get_linear_schedule_with_warmup( _lowercase, num_warmup_steps=args.warmup_steps, num_training_steps=_lowercase ) if args.do_train: _lowercase , _lowercase , _lowercase : int = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ), desc='Epoch' ): _lowercase : Optional[Any] = 0 _lowercase : Union[str, Any] = 0 _lowercase : Dict = tqdm(_lowercase, desc='Training' ) for step, batch in enumerate(_lowercase ): _lowercase : Dict = tuple(t.to(_lowercase ) for t in batch ) _lowercase , _lowercase , _lowercase , _lowercase : Dict = batch _lowercase : Optional[Any] = model(_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase ) _lowercase : Any = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _lowercase : str = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _lowercase : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase, scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _lowercase : List[str] = model.module if hasattr(_lowercase, 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _lowercase : Optional[int] = os.path.join(args.output_dir, _lowercase ) _lowercase : List[Any] = os.path.join(args.output_dir, _lowercase ) torch.save(model_to_save.state_dict(), _lowercase ) model_to_save.config.to_json_file(_lowercase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _lowercase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _lowercase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_lowercase ) if args.do_eval: model.eval() _lowercase , _lowercase : List[Any] = 0, 0 _lowercase , _lowercase : List[str] = 0, 0 for batch in tqdm(_lowercase, desc='Evaluating' ): _lowercase : str = tuple(t.to(_lowercase ) for t in batch ) _lowercase , _lowercase , _lowercase , _lowercase : Any = batch with torch.no_grad(): _lowercase , _lowercase , _lowercase , _lowercase : Tuple = model( _lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase ) _lowercase : List[str] = mc_logits.detach().cpu().numpy() _lowercase : Any = mc_labels.to('cpu' ).numpy() _lowercase : int = accuracy(_lowercase, _lowercase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _lowercase : Tuple = eval_loss / nb_eval_steps _lowercase : Optional[int] = eval_accuracy / nb_eval_examples _lowercase : Tuple = tr_loss / nb_tr_steps if args.do_train else None _lowercase : List[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} _lowercase : Optional[Any] = os.path.join(args.output_dir, 'eval_results.txt' ) with open(_lowercase, 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s', _lowercase, str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping _A : str =tuple[int, int] class lowerCamelCase__ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase_ : set[int] , UpperCamelCase_ : Mapping[EdgeT, int] ) -> None: '''simple docstring''' _lowercase : set[int] = vertices _lowercase : dict[EdgeT, int] = { (min(UpperCamelCase_ ), max(UpperCamelCase_ )): weight for edge, weight in edges.items() } def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : EdgeT , UpperCamelCase_ : int ) -> None: '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _lowercase : List[Any] = weight def __UpperCAmelCase ( self : Any ) -> Graph: '''simple docstring''' _lowercase : Graph = Graph({min(self.vertices )} , {} ) _lowercase : EdgeT _lowercase : int _lowercase : EdgeT _lowercase : int while len(subgraph.vertices ) < len(self.vertices ): _lowercase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _lowercase : Optional[Any] = edge _lowercase : List[Any] = weight subgraph.add_edge(UpperCamelCase_ , UpperCamelCase_ ) return subgraph def __UpperCamelCase ( _lowercase = "p107_network.txt" ) -> int: _lowercase : str = os.path.abspath(os.path.dirname(_lowercase ) ) _lowercase : str = os.path.join(_lowercase, _lowercase ) _lowercase : dict[EdgeT, int] = {} _lowercase : list[str] _lowercase : int _lowercase : int with open(_lowercase ) as f: _lowercase : List[str] = f.read().strip().split('\n' ) _lowercase : Optional[int] = [line.split(',' ) for line in data] for edgea in range(1, len(_lowercase ) ): for edgea in range(_lowercase ): if adjaceny_matrix[edgea][edgea] != "-": _lowercase : Dict = int(adjaceny_matrix[edgea][edgea] ) _lowercase : Graph = Graph(set(range(len(_lowercase ) ) ), _lowercase ) _lowercase : Graph = graph.prims_algorithm() _lowercase : int = sum(graph.edges.values() ) _lowercase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F'''{solution() = }''')
4
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
1
'''simple docstring''' from collections import defaultdict class lowerCamelCase__ : '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' _lowercase : List[str] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _lowercase : int = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] _lowercase : Optional[int] = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _lowercase : Union[str, Any] = (1 << len(UpperCamelCase_ )) - 1 def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _lowercase : List[str] = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _lowercase : Dict = total_ways_util return self.dp[mask][task_no] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> List[Any]: '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _A : Dict =5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _A : List[str] =[[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
4
'''simple docstring''' from __future__ import annotations import requests def __UpperCamelCase ( _lowercase ) -> dict: _lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_lowercase ).json() def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]: _lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories] return [get_hackernews_story(_lowercase ) for story_id in story_ids] def __UpperCamelCase ( _lowercase = 10 ) -> str: _lowercase : Tuple = hackernews_top_stories(_lowercase ) return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
1
'''simple docstring''' import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _A : Optional[Any] ='''bert-base-cased''' _A : List[Any] ='''google/pegasus-xsum''' _A : Optional[int] =[''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] _A : Any =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] _A : Optional[int] ='''patrickvonplaten/t5-tiny-random''' _A : int ='''sshleifer/bart-tiny-random''' _A : str ='''sshleifer/tiny-mbart''' _A : List[Any] ='''sshleifer/tiny-marian-en-de''' def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : str = '\n'.join(_lowercase ) Path(_lowercase ).open('w' ).writelines(_lowercase ) def __UpperCamelCase ( _lowercase ) -> List[Any]: for split in ["train", "val", "test"]: _dump_articles(os.path.join(_lowercase, f'''{split}.source''' ), _lowercase ) _dump_articles(os.path.join(_lowercase, f'''{split}.target''' ), _lowercase ) return tmp_dir class lowerCamelCase__ ( A ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] ) -> List[str]: '''simple docstring''' _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) _lowercase : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowercase : List[Any] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in ARTICLES ) _lowercase : Dict = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in SUMMARIES ) _lowercase : int = 4 _lowercase : Union[str, Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _lowercase , _lowercase : str = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _lowercase : Dict = SeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='train' , max_source_length=UpperCamelCase_ , max_target_length=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , ) _lowercase : List[str] = DataLoader(UpperCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _lowercase : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) _lowercase : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowercase : str = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in ARTICLES ) _lowercase : List[Any] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in SUMMARIES ) _lowercase : List[Any] = 4 _lowercase : Optional[int] = LegacySeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=UpperCamelCase_ , ) _lowercase : Any = DataLoader(UpperCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' _lowercase : Tuple = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _lowercase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _lowercase : Dict = tmp_dir.joinpath('train.source' ).open().readlines() _lowercase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(UpperCamelCase_ , UpperCamelCase_ , 128 , UpperCamelCase_ ) _lowercase : Dict = {x.name for x in tmp_dir.iterdir()} _lowercase : Optional[int] = {x.name for x in save_dir.iterdir()} _lowercase : List[Any] = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(UpperCamelCase_ ) < len(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(UpperCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : Any ) -> Any: '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _lowercase , _lowercase , _lowercase : int = self._get_dataset(max_len=64 ) _lowercase : List[Any] = 64 _lowercase : List[Any] = ds.make_dynamic_sampler(UpperCamelCase_ , required_batch_size_multiple=UpperCamelCase_ ) _lowercase : Union[str, Any] = [len(UpperCamelCase_ ) for x in batch_sampler] assert len(set(UpperCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(UpperCamelCase_ ) == len(UpperCamelCase_ ) # no dropped or added examples _lowercase : List[Any] = DataLoader(UpperCamelCase_ , batch_sampler=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _lowercase : Optional[int] = [] _lowercase : Optional[Any] = [] for batch in data_loader: _lowercase : str = batch['input_ids'].shape _lowercase : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _lowercase : Optional[Any] = np.product(batch['input_ids'].shape ) num_src_per_batch.append(UpperCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(UpperCamelCase_ ) assert num_src_per_batch[0] == max(UpperCamelCase_ ) if failures: raise AssertionError(F'''too many tokens in {len(UpperCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Dict ) -> int: '''simple docstring''' _lowercase , _lowercase , _lowercase : str = self._get_dataset(max_len=512 ) _lowercase : Any = 2 _lowercase : List[Any] = ds.make_sortish_sampler(UpperCamelCase_ , shuffle=UpperCamelCase_ ) _lowercase : Union[str, Any] = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _lowercase : int = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase_ ) _lowercase : List[Any] = tokenizer.pad_token_id def count_pad_tokens(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]="input_ids" ): return [batch[k].eq(UpperCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(UpperCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(UpperCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(UpperCamelCase_ ) ) < sum(count_pad_tokens(UpperCamelCase_ ) ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[Any]=1000 , UpperCamelCase_ : str=128 ) -> Union[str, Any]: '''simple docstring''' if os.getenv('USE_REAL_DATA' , UpperCamelCase_ ): _lowercase : List[str] = 'examples/seq2seq/wmt_en_ro' _lowercase : Any = max_len * 2 * 64 if not Path(UpperCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(UpperCamelCase_ , UpperCamelCase_ ) else: _lowercase : Optional[int] = 'examples/seq2seq/test_data/wmt_en_ro' _lowercase : List[str] = max_len * 4 save_len_file(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ ) _lowercase : List[str] = SeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='train' , max_source_length=UpperCamelCase_ , max_target_length=UpperCamelCase_ , n_obs=UpperCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' _lowercase , _lowercase , _lowercase : List[str] = self._get_dataset() _lowercase : List[str] = set(DistributedSortishSampler(UpperCamelCase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase_ ) ) _lowercase : Any = set(DistributedSortishSampler(UpperCamelCase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase_ ) ) assert idsa.intersection(UpperCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' _lowercase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) if tok_name == MBART_TINY: _lowercase : Any = SeqaSeqDataset( UpperCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _lowercase : int = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _lowercase : str = SeqaSeqDataset( UpperCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _lowercase : Optional[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(UpperCamelCase_ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase_ ) == 0
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Dict ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """megatron-bert""" def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Optional[Any] = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : Optional[Any] = use_cache
4
1
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _A : List[Any] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Any , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase_ ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[Any]=None ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[int] = {} if frame_sampling_rate is not None: _lowercase : str = frame_sampling_rate if num_frames is not None: _lowercase : int = num_frames _lowercase : str = {} if top_k is not None: _lowercase : Any = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , **UpperCamelCase_ : str ) -> int: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=1 ) -> Dict: '''simple docstring''' if num_frames is None: _lowercase : str = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): _lowercase : Tuple = BytesIO(requests.get(UpperCamelCase_ ).content ) _lowercase : str = VideoReader(UpperCamelCase_ ) videoreader.seek(0 ) _lowercase : Any = 0 _lowercase : Dict = num_frames * frame_sampling_rate - 1 _lowercase : Optional[int] = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa ) _lowercase : Union[str, Any] = videoreader.get_batch(UpperCamelCase_ ).asnumpy() _lowercase : Dict = list(UpperCamelCase_ ) _lowercase : Optional[Any] = self.image_processor(UpperCamelCase_ , return_tensors=self.framework ) return model_inputs def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = self.model(**UpperCamelCase_ ) return model_outputs def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int]=5 ) -> Optional[Any]: '''simple docstring''' if top_k > self.model.config.num_labels: _lowercase : Optional[int] = self.model.config.num_labels if self.framework == "pt": _lowercase : List[Any] = model_outputs.logits.softmax(-1 )[0] _lowercase , _lowercase : Tuple = probs.topk(UpperCamelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = scores.tolist() _lowercase : Optional[int] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
4
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Tuple = args.pruning_method _lowercase : int = args.threshold _lowercase : str = args.model_name_or_path.rstrip('/' ) _lowercase : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) ) _lowercase : List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _lowercase : Optional[int] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _lowercase : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _lowercase : Dict = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase ) _lowercase : Optional[Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _lowercase : Optional[Any] = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase ) _lowercase : str = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _lowercase : str = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase ) _lowercase : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _lowercase : Optional[int] = name[:-6] _lowercase : List[str] = model[f'''{prefix_}mask_scores'''] _lowercase , _lowercase : Union[str, Any] = -0.1, 1.1 _lowercase : str = torch.sigmoid(_lowercase ) _lowercase : int = s * (r - l) + l _lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 ) _lowercase : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _lowercase : List[Any] = os.path.join( os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' ) if not os.path.isdir(_lowercase ): shutil.copytree(_lowercase, _lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) _A : List[Any] =parser.parse_args() main(args)
4
1
'''simple docstring''' import os import sys import unittest _A : Tuple =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _A : Union[str, Any] =os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') _A : Dict =os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = get_test_to_tester_mapping(UpperCamelCase_ ) _lowercase : Optional[int] = get_test_to_tester_mapping(UpperCamelCase_ ) _lowercase : str = {'BertModelTest': 'BertModelTester'} _lowercase : List[Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : Union[str, Any] = get_model_to_test_mapping(UpperCamelCase_ ) _lowercase : int = get_model_to_test_mapping(UpperCamelCase_ ) _lowercase : Union[str, Any] = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } _lowercase : Optional[int] = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) def __UpperCAmelCase ( self : int ) -> str: '''simple docstring''' _lowercase : Dict = get_model_to_tester_mapping(UpperCamelCase_ ) _lowercase : Union[str, Any] = get_model_to_tester_mapping(UpperCamelCase_ ) _lowercase : int = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } _lowercase : Union[str, Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
4
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Union[str, Any] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features"""] def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=80 , UpperCamelCase_ : Optional[int]=1_6000 , UpperCamelCase_ : Dict=160 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Optional[Any]=400 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Tuple=False , **UpperCamelCase_ : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[str] = n_fft _lowercase : List[Any] = hop_length _lowercase : Optional[Any] = chunk_length _lowercase : Optional[int] = chunk_length * sampling_rate _lowercase : List[Any] = self.n_samples // hop_length _lowercase : List[Any] = sampling_rate _lowercase : Optional[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : np.array ) -> np.ndarray: '''simple docstring''' _lowercase : Optional[int] = spectrogram( UpperCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) _lowercase : Union[str, Any] = log_spec[:, :-1] _lowercase : Dict = np.maximum(UpperCamelCase_ , log_spec.max() - 8.0 ) _lowercase : Tuple = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __UpperCAmelCase ( UpperCamelCase_ : List[np.ndarray] , UpperCamelCase_ : List[np.ndarray] , UpperCamelCase_ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _lowercase : List[str] = np.array(UpperCamelCase_ , np.intaa ) _lowercase : Optional[Any] = [] for vector, length in zip(UpperCamelCase_ , attention_mask.sum(-1 ) ): _lowercase : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _lowercase : str = padding_value normed_input_values.append(UpperCamelCase_ ) else: _lowercase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Tuple , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "max_length" , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , **UpperCamelCase_ : int , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : List[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : Tuple = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : List[str] = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : Tuple = [np.asarray([raw_speech] ).T] _lowercase : Dict = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding _lowercase : Any = self.pad( UpperCamelCase_ , padding=UpperCamelCase_ , max_length=max_length if max_length else self.n_samples , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _lowercase : str = self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) _lowercase : Optional[Any] = np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format _lowercase : str = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) _lowercase : List[Any] = [self._np_extract_fbank_features(UpperCamelCase_ ) for waveform in input_features[0]] if isinstance(input_features[0] , UpperCamelCase_ ): _lowercase : Any = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features] else: _lowercase : List[str] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _lowercase : Optional[Any] = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: _lowercase : Optional[Any] = padded_inputs.convert_to_tensors(UpperCamelCase_ ) return padded_inputs def __UpperCAmelCase ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Optional[int] = copy.deepcopy(self.__dict__ ) _lowercase : Any = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
4
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowerCamelCase__ : '''simple docstring''' def __init__( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=13 , UpperCamelCase_ : str=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Any=None , ) -> List[str]: '''simple docstring''' _lowercase : Any = parent _lowercase : Tuple = batch_size _lowercase : List[Any] = seq_length _lowercase : Any = is_training _lowercase : List[Any] = use_token_type_ids _lowercase : Any = use_labels _lowercase : Dict = vocab_size _lowercase : Dict = hidden_size _lowercase : Optional[int] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : str = intermediate_size _lowercase : List[Any] = hidden_act _lowercase : List[Any] = hidden_dropout_prob _lowercase : Any = attention_probs_dropout_prob _lowercase : Union[str, Any] = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Union[str, Any] = type_sequence_label_size _lowercase : Optional[int] = initializer_range _lowercase : str = num_labels _lowercase : Tuple = num_choices _lowercase : str = scope _lowercase : List[str] = self.vocab_size - 1 def __UpperCAmelCase ( self : Tuple ) -> int: '''simple docstring''' _lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Optional[Any] = None _lowercase : List[str] = None _lowercase : Union[str, Any] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : int = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : List[str] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _lowercase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict , *UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' _lowercase : Optional[int] = OpenAIGPTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) _lowercase : List[str] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) _lowercase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , *UpperCamelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' _lowercase : Tuple = OpenAIGPTLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , *UpperCamelCase_ : Union[str, Any] ) -> Dict: '''simple docstring''' _lowercase : int = OpenAIGPTDoubleHeadsModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , *UpperCamelCase_ : List[str] ) -> Any: '''simple docstring''' _lowercase : Dict = self.num_labels _lowercase : Optional[int] = OpenAIGPTForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = config_and_inputs _lowercase : str = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , A , unittest.TestCase ): '''simple docstring''' A_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ) -> List[Any]: '''simple docstring''' _lowercase : List[str] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowercase : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ , ) _lowercase : Optional[int] = inputs_dict['labels'] _lowercase : Optional[Any] = inputs_dict['labels'] _lowercase : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase_ , ) _lowercase : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def __UpperCAmelCase ( self : List[str] ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = OpenAIGPTModelTester(self ) _lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[int] = OpenAIGPTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : str ) -> List[Any]: '''simple docstring''' _lowercase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(UpperCamelCase_ ) _lowercase : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase_ ) # the president is _lowercase : Optional[int] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowercase : Optional[Any] = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
4
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
1
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def __UpperCamelCase ( _lowercase, _lowercase = True, _lowercase = math.inf, _lowercase = -math.inf, _lowercase = math.inf, _lowercase = -math.inf, _lowercase = False, _lowercase = 100, _lowercase = 0.0_1, _lowercase = 1, ) -> Any: _lowercase : Dict = False _lowercase : Optional[Any] = search_prob _lowercase : Any = start_temperate _lowercase : int = [] _lowercase : List[Any] = 0 _lowercase : List[Any] = None while not search_end: _lowercase : int = current_state.score() if best_state is None or current_score > best_state.score(): _lowercase : int = current_state scores.append(_lowercase ) iterations += 1 _lowercase : Tuple = None _lowercase : List[str] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _lowercase : Optional[Any] = random.randint(0, len(_lowercase ) - 1 ) # picking a random neighbor _lowercase : List[Any] = neighbors.pop(_lowercase ) _lowercase : List[str] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _lowercase : Union[str, Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution _lowercase : str = picked_neighbor else: _lowercase : str = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _lowercase : List[Any] = picked_neighbor _lowercase : Any = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _lowercase : Dict = True else: _lowercase : int = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_lowercase ), _lowercase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _A : Dict =SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) _A : Optional[Any] =simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _A : Union[str, Any] =SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) _A : Optional[Any] =simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: return (3 * x**2) - (6 * y) _A : Any =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _A : Union[str, Any] =simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _A : List[str] =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _A : Any =simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
4
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast _A : Dict =datasets.utils.logging.get_logger(__name__) @dataclass class lowerCamelCase__ ( datasets.BuilderConfig ): '''simple docstring''' A_ = 1_0000 A_ = None A_ = None class lowerCamelCase__ ( datasets.ArrowBasedBuilder ): '''simple docstring''' A_ = ParquetConfig def __UpperCAmelCase ( self : Optional[Any] ) -> int: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _lowercase : Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase_ , (str, list, tuple) ): _lowercase : Any = data_files if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : Optional[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowercase : Any = [dl_manager.iter_files(UpperCamelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowercase : Tuple = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowercase : Union[str, Any] = [dl_manager.iter_files(UpperCamelCase_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(UpperCamelCase_ ): with open(UpperCamelCase_ , 'rb' ) as f: _lowercase : int = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase_ ) ) break splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'files': files} ) ) return splits def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : pa.Table ) -> pa.Table: '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowercase : Dict = table_cast(UpperCamelCase_ , self.info.features.arrow_schema ) return pa_table def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ): with open(UpperCamelCase_ , 'rb' ) as f: _lowercase : Optional[Any] = pq.ParquetFile(UpperCamelCase_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): _lowercase : Union[str, Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(UpperCamelCase_ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase_ )}: {e}''' ) raise
4
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __UpperCamelCase ( _lowercase ) -> None: _lowercase , _lowercase : List[Any] = analyze_text(_lowercase ) _lowercase : Any = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. _lowercase : Union[str, Any] = sum(single_char_strings.values() ) # one length string _lowercase : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _lowercase : Any = single_char_strings[ch] _lowercase : int = my_str / all_sum my_fir_sum += prob * math.loga(_lowercase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _lowercase : str = sum(two_char_strings.values() ) _lowercase : str = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _lowercase : Optional[Any] = cha + cha if sequence in two_char_strings: _lowercase : int = two_char_strings[sequence] _lowercase : Optional[int] = int(_lowercase ) / all_sum my_sec_sum += prob * math.loga(_lowercase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]: _lowercase : Optional[Any] = Counter() # type: ignore _lowercase : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(_lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __UpperCamelCase ( ) -> List[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
4
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A : Any ={ '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowercase : List[Any] = 'The dog is cute and lives in the garden house' _lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] ) _lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowercase : Tuple = jnp.array( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) _lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state'] self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
4
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _A : Optional[int] =logging.get_logger(__name__) _A : Dict =[ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _A : str =[ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: _lowercase : str = torch.load(_lowercase, map_location='cpu' ) return sd def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=rename_keys_prefix ) -> str: _lowercase : Dict = OrderedDict() _lowercase : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _lowercase : Any = key for name_pair in rename_keys_prefix: _lowercase : Any = new_key.replace(name_pair[0], name_pair[1] ) _lowercase : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _lowercase : List[str] = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple: assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _lowercase : Union[str, Any] = 'pretraining' if "vcr" in checkpoint_path: _lowercase : Optional[int] = {'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: _lowercase : int = {'visual_embedding_dim': 2048} elif "vqa" in checkpoint_path: _lowercase : Dict = {'visual_embedding_dim': 2048} elif "nlvr" in checkpoint_path: _lowercase : Any = {'visual_embedding_dim': 1024} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: _lowercase : Optional[Any] = {'visual_embedding_dim': 512} _lowercase : Optional[Any] = 'multichoice' elif "vqa_advanced" in checkpoint_path: _lowercase : List[str] = {'visual_embedding_dim': 2048} _lowercase : Optional[Any] = 'vqa_advanced' elif "vqa" in checkpoint_path: _lowercase : List[Any] = {'visual_embedding_dim': 2048, 'num_labels': 3129} _lowercase : Any = 'vqa' elif "nlvr" in checkpoint_path: _lowercase : Dict = { 'visual_embedding_dim': 1024, 'num_labels': 2, } _lowercase : Dict = 'nlvr' _lowercase : Optional[int] = VisualBertConfig(**_lowercase ) # Load State Dict _lowercase : Tuple = load_state_dict(_lowercase ) _lowercase : List[Any] = get_new_dict(_lowercase, _lowercase ) if model_type == "pretraining": _lowercase : Optional[int] = VisualBertForPreTraining(_lowercase ) elif model_type == "vqa": _lowercase : int = VisualBertForQuestionAnswering(_lowercase ) elif model_type == "nlvr": _lowercase : List[Any] = VisualBertForVisualReasoning(_lowercase ) elif model_type == "multichoice": _lowercase : Dict = VisualBertForMultipleChoice(_lowercase ) model.load_state_dict(_lowercase ) # Save Checkpoints Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _A : int =parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
4
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A : List[str] ='''pt''' elif is_tf_available(): _A : Tuple ='''tf''' else: _A : Optional[int] ='''jax''' class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = ByTaTokenizer A_ = False def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().setUp() _lowercase : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' _lowercase : Dict = [] for i in range(len(UpperCamelCase_ ) ): try: _lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) ) _lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) ) if max_length is not None and len(UpperCamelCase_ ) > max_length: _lowercase : List[Any] = toks[:max_length] if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0: while len(UpperCamelCase_ ) < min_length: _lowercase : Tuple = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Dict = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) if " " not in output_txt and len(UpperCamelCase_ ) > 1: _lowercase : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ ) ) if with_prefix_space: _lowercase : Union[str, Any] = ' ' + output_txt _lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) return output_txt, output_ids def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' _lowercase : List[str] = self.ta_base_tokenizer _lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) _lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = self.ta_base_tokenizer _lowercase : Tuple = 'Unicode โ‚ฌ.' _lowercase : List[Any] = tokenizer(UpperCamelCase_ ) _lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : List[str] = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'Unicode โ‚ฌ.</s>' ) _lowercase : Any = tokenizer('e รจ รฉ รช รซ' ) _lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : Tuple = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'e รจ รฉ รช รซ</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e รจ รฉ รช รซ' ) ) , 'e รจ รฉ รช รซ</s>' ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = self.ta_base_tokenizer _lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) if FRAMEWORK != "jax": _lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = self.ta_base_tokenizer _lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , UpperCamelCase_ ) self.assertIn('attention_mask' , UpperCamelCase_ ) self.assertNotIn('decoder_input_ids' , UpperCamelCase_ ) self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> int: '''simple docstring''' _lowercase : Tuple = self.ta_base_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : str = tokenizer( text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' _lowercase : str = self.ta_base_tokenizer _lowercase : str = ['A long paragraph for summarization. </s>'] _lowercase : Optional[int] = ['Summary of the text. </s>'] # fmt: off _lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] ) self.assertEqual(UpperCamelCase_ , batch['labels'][0] ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowercase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[Any] = tempfile.mkdtemp() _lowercase : Any = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) shutil.rmtree(UpperCamelCase_ ) _lowercase : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : int = json.load(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(UpperCamelCase_ ) _lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )] _lowercase : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : int = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )] _lowercase : Tuple = tokenizer_class.from_pretrained( UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) _lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] _lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Optional[int] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _lowercase : Optional[int] = 0 _lowercase : int = tokenizer.convert_ids_to_tokens( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for attr in attributes_list: setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _A : Any ={'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] =['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : int =['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' _A : Dict =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : int = filter(lambda _lowercase : p.requires_grad, model.parameters() ) _lowercase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _A : List[Any] =logging.getLogger(__name__) def __UpperCamelCase ( _lowercase, _lowercase ) -> Any: if metric == "rouge2": _lowercase : Optional[Any] = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": _lowercase : List[str] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": _lowercase : List[str] = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": _lowercase : Dict = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) _lowercase : Any = ModelCheckpoint( dirpath=_lowercase, filename=_lowercase, monitor=f'''val_{metric}''', mode='max', save_top_k=1, every_n_epochs=1, ) return checkpoint_callback def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict: return EarlyStopping( monitor=f'''val_{metric}''', mode='min' if 'loss' in metric else 'max', patience=_lowercase, verbose=_lowercase, ) class lowerCamelCase__ ( pl.Callback ): '''simple docstring''' def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> List[str]: '''simple docstring''' _lowercase : Dict = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase_ ) @rank_zero_only def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule , UpperCamelCase_ : str , UpperCamelCase_ : Dict=True ) -> None: '''simple docstring''' logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _lowercase : Tuple = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results _lowercase : Union[str, Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": _lowercase : Optional[Any] = od / 'test_results.txt' _lowercase : List[str] = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _lowercase : Union[str, Any] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' _lowercase : List[str] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=UpperCamelCase_ ) generations_file.parent.mkdir(exist_ok=UpperCamelCase_ ) with open(UpperCamelCase_ , 'a+' ) as writer: for key in sorted(UpperCamelCase_ ): if key in ["log", "progress_bar", "preds"]: continue _lowercase : Tuple = metrics[key] if isinstance(UpperCamelCase_ , torch.Tensor ): _lowercase : Any = val.item() _lowercase : Optional[int] = F'''{key}: {val:.6f}\n''' writer.write(UpperCamelCase_ ) if not save_generations: return if "preds" in metrics: _lowercase : Tuple = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(UpperCamelCase_ ) @rank_zero_only def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' try: _lowercase : List[Any] = pl_module.model.model.num_parameters() except AttributeError: _lowercase : Dict = pl_module.model.num_parameters() _lowercase : Dict = count_trainable_parameters(UpperCamelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> Tuple: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase_ , UpperCamelCase_ , 'test' ) @rank_zero_only def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
4
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : int = torch.exp(_lowercase ) _lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i) _lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowercase ) - B / A class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__() _lowercase : int = config.output_attentions _lowercase : int = config.output_hidden_states _lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int: '''simple docstring''' if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): _lowercase : Optional[Any] = x else: _lowercase : Optional[int] = x def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : int = () _lowercase : List[Any] = () _lowercase : Tuple = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowercase : Optional[int] = all_hidden_states + (hidden_states,) _lowercase : str = layer_module( UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = layer_outputs[0] if self.output_attentions: _lowercase : Tuple = all_attentions + (layer_outputs[1],) _lowercase : Optional[int] = (hidden_states,) if self.output_hidden_states: _lowercase : str = current_outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[int] = current_outputs + (all_attentions,) _lowercase : List[Any] = self.highway[i](UpperCamelCase_ ) # logits, pooled_output if not self.training: _lowercase : Dict = highway_exit[0] _lowercase : Tuple = entropy(UpperCamelCase_ ) _lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowercase : str = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase_ , i + 1 ) else: _lowercase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowercase : str = all_hidden_states + (hidden_states,) _lowercase : Optional[Any] = (hidden_states,) if self.output_hidden_states: _lowercase : Dict = outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[Any] = outputs + (all_attentions,) _lowercase : Optional[int] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : int = config _lowercase : int = BertEmbeddings(UpperCamelCase_ ) _lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ ) _lowercase : Any = BertPooler(UpperCamelCase_ ) self.init_weights() def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' return self.embeddings.word_embeddings def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = value def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _lowercase : Any = input_ids.size() elif inputs_embeds is not None: _lowercase : Any = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if encoder_attention_mask is None: _lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: _lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowercase : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowercase : int = encoder_attention_mask[:, None, None, :] _lowercase : str = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) _lowercase : Dict = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) _lowercase : List[Any] = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _lowercase : int = encoder_outputs[0] _lowercase : str = self.pooler(UpperCamelCase_ ) _lowercase : List[Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = message _lowercase : Dict = exit_layer # start from 1! class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' super().__init__() _lowercase : Optional[Any] = BertPooler(UpperCamelCase_ ) _lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowercase : int = nn.Linear(config.hidden_size , config.num_labels ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' _lowercase : str = encoder_outputs[0] _lowercase : int = self.pooler(UpperCamelCase_ ) # "return" pooler_output # BertModel _lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowercase : Dict = bmodel_output[1] _lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ ) _lowercase : str = self.classifier(UpperCamelCase_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : Dict = config.num_labels _lowercase : Any = config.num_hidden_layers _lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ ) _lowercase : Any = nn.Dropout(config.hidden_dropout_prob ) _lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = self.num_layers try: _lowercase : Tuple = self.bert( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowercase : List[Any] = outputs[1] _lowercase : int = self.dropout(UpperCamelCase_ ) _lowercase : Optional[int] = self.classifier(UpperCamelCase_ ) _lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowercase : Union[str, Any] = e.message _lowercase : Any = e.exit_layer _lowercase : Optional[int] = outputs[0] if not self.training: _lowercase : Union[str, Any] = entropy(UpperCamelCase_ ) _lowercase : Tuple = [] _lowercase : Tuple = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowercase : Tuple = MSELoss() _lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Union[str, Any] = CrossEntropyLoss() _lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowercase : Optional[Any] = [] for highway_exit in outputs[-1]: _lowercase : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowercase : Union[str, Any] = MSELoss() _lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Dict = CrossEntropyLoss() _lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: _lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowercase : Optional[Any] = (loss,) + outputs if not self.training: _lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowercase : Dict = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
4
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( A , A , A , unittest.TestCase ): '''simple docstring''' A_ = AltDiffusionPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS def __UpperCAmelCase ( self : int ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _lowercase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _lowercase : Optional[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) _lowercase : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) _lowercase : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) _lowercase : str = CLIPTextModel(UpperCamelCase_ ) _lowercase : str = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) _lowercase : Union[str, Any] = 77 _lowercase : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=0 ) -> Optional[int]: '''simple docstring''' if str(UpperCamelCase_ ).startswith('mps' ): _lowercase : Union[str, Any] = torch.manual_seed(UpperCamelCase_ ) else: _lowercase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) _lowercase : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: '''simple docstring''' _lowercase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : List[Any] = self.get_dummy_components() torch.manual_seed(0 ) _lowercase : List[Any] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _lowercase : Optional[Any] = RobertaSeriesModelWithTransformation(UpperCamelCase_ ) _lowercase : str = text_encoder _lowercase : str = AltDiffusionPipeline(**UpperCamelCase_ ) _lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : int = 'A photo of an astronaut' _lowercase : Optional[int] = alt_pipe(**UpperCamelCase_ ) _lowercase : List[str] = output.images _lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowercase : Union[str, Any] = np.array( [0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : List[Any] = self.get_dummy_components() _lowercase : int = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) torch.manual_seed(0 ) _lowercase : Dict = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _lowercase : Optional[int] = RobertaSeriesModelWithTransformation(UpperCamelCase_ ) _lowercase : Tuple = text_encoder _lowercase : str = AltDiffusionPipeline(**UpperCamelCase_ ) _lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : List[str] = alt_pipe(**UpperCamelCase_ ) _lowercase : Any = output.images _lowercase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowercase : str = np.array( [0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : int = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=UpperCamelCase_ ) _lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : Any = 'A painting of a squirrel eating a burger' _lowercase : int = torch.manual_seed(0 ) _lowercase : Dict = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' ) _lowercase : List[Any] = output.images _lowercase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowercase : Optional[int] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Union[str, Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' ) _lowercase : Optional[Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ ) _lowercase : str = alt_pipe.to(UpperCamelCase_ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : int = 'A painting of a squirrel eating a burger' _lowercase : Any = torch.manual_seed(0 ) _lowercase : Optional[Any] = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='numpy' ) _lowercase : List[Any] = output.images _lowercase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowercase : str = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
4
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Tuple = [2, 4, 6, 8, 10, 12] _lowercase : Optional[Any] = 100 self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' self.assertRaisesRegex( UpperCamelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
4
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __UpperCamelCase ( _lowercase ) -> Dict: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E_00 and cp <= 0x9F_FF) or (cp >= 0x34_00 and cp <= 0x4D_BF) # or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) # or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) # or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) # or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) # or (cp >= 0xF9_00 and cp <= 0xFA_FF) or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) # ): # return True return False def __UpperCamelCase ( _lowercase ) -> Dict: # word like '180' or '่บซ้ซ˜' or '็ฅž' for char in word: _lowercase : Union[str, Any] = ord(_lowercase ) if not _is_chinese_char(_lowercase ): return 0 return 1 def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase : Optional[Any] = set() for token in tokens: _lowercase : Any = len(_lowercase ) > 1 and is_chinese(_lowercase ) if chinese_word: word_set.add(_lowercase ) _lowercase : List[Any] = list(_lowercase ) return word_list def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: if not chinese_word_set: return bert_tokens _lowercase : str = max([len(_lowercase ) for w in chinese_word_set] ) _lowercase : int = bert_tokens _lowercase , _lowercase : List[Any] = 0, len(_lowercase ) while start < end: _lowercase : Dict = True if is_chinese(bert_word[start] ): _lowercase : int = min(end - start, _lowercase ) for i in range(_lowercase, 1, -1 ): _lowercase : List[str] = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _lowercase : Dict = '##' + bert_word[j] _lowercase : Optional[int] = start + i _lowercase : Optional[Any] = False break if single_word: start += 1 return bert_word def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]: _lowercase : Any = [] for i in range(0, len(_lowercase ), 100 ): _lowercase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0] _lowercase : Dict = [get_chinese_word(_lowercase ) for r in res] ltp_res.extend(_lowercase ) assert len(_lowercase ) == len(_lowercase ) _lowercase : str = [] for i in range(0, len(_lowercase ), 100 ): _lowercase : int = bert_tokenizer(lines[i : i + 100], add_special_tokens=_lowercase, truncation=_lowercase, max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(_lowercase ) == len(_lowercase ) _lowercase : Optional[Any] = [] for input_ids, chinese_word in zip(_lowercase, _lowercase ): _lowercase : Optional[Any] = [] for id in input_ids: _lowercase : str = bert_tokenizer._convert_id_to_token(_lowercase ) input_tokens.append(_lowercase ) _lowercase : Any = add_sub_symbol(_lowercase, _lowercase ) _lowercase : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowercase ): if token[:2] == "##": _lowercase : str = token[2:] # save chinese tokens' pos if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ): ref_id.append(_lowercase ) ref_ids.append(_lowercase ) assert len(_lowercase ) == len(_lowercase ) return ref_ids def __UpperCamelCase ( _lowercase ) -> Tuple: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name, 'r', encoding='utf-8' ) as f: _lowercase : int = f.readlines() _lowercase : str = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _lowercase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device _lowercase : Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) _lowercase : Optional[Any] = prepare_ref(_lowercase, _lowercase, _lowercase ) with open(args.save_path, 'w', encoding='utf-8' ) as f: _lowercase : Optional[Any] = [json.dumps(_lowercase ) + '\n' for ref in ref_ids] f.writelines(_lowercase ) if __name__ == "__main__": _A : Any =argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') _A : Optional[int] =parser.parse_args() main(args)
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Tuple =['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> float: if edge <= 0 or not isinstance(_lowercase, _lowercase ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCamelCase ( _lowercase ) -> float: if edge <= 0 or not isinstance(_lowercase, _lowercase ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """markuplm""" def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[Any] = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : List[Any] = type_vocab_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Optional[Any] = position_embedding_type _lowercase : str = use_cache _lowercase : str = classifier_dropout # additional properties _lowercase : int = max_depth _lowercase : Dict = max_xpath_tag_unit_embeddings _lowercase : str = max_xpath_subs_unit_embeddings _lowercase : List[str] = tag_pad_id _lowercase : Optional[int] = subs_pad_id _lowercase : Any = xpath_unit_hidden_size
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase = 1000 ) -> int: _lowercase : Dict = -1 _lowercase : Optional[int] = 0 for a in range(1, n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c _lowercase : List[str] = (n * n - 2 * a * n) // (2 * n - 2 * a) _lowercase : Tuple = n - a - b if c * c == (a * a + b * b): _lowercase : int = a * b * c if candidate >= product: _lowercase : Optional[int] = candidate return product if __name__ == "__main__": print(F'''{solution() = }''')
4
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> int: if not isinstance(_lowercase, _lowercase ): raise ValueError('Input must be an integer' ) if input_num <= 0: raise ValueError('Input must be positive' ) return sum( divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = IFInpaintingPipeline A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ = PipelineTesterMixin.required_optional_params - {"""latents"""} def __UpperCAmelCase ( self : List[Any] ) -> Any: '''simple docstring''' return self._get_dummy_components() def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=0 ) -> Dict: '''simple docstring''' if str(UpperCamelCase_ ).startswith('mps' ): _lowercase : List[str] = torch.manual_seed(UpperCamelCase_ ) else: _lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) _lowercase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) _lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) _lowercase : List[str] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' self._test_save_load_local() def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
4
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
1
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _A : List[str] =logging.getLogger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=None ) -> Optional[int]: '''simple docstring''' super().__init__( UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , ) _lowercase : Optional[int] = None def __UpperCAmelCase ( self : str , UpperCamelCase_ : int ) -> Any: '''simple docstring''' logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually _lowercase : int = self._infer_socket_ifname() # avoid clash with the NCCL port _lowercase : Union[str, Any] = str(distributed_port + 1 ) _lowercase : Optional[int] = dist.new_group(ranks=UpperCamelCase_ , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __UpperCAmelCase ( self : Dict ) -> Dict: '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=torch.floataa ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ ) dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group ) return target_tensor def __UpperCAmelCase ( self : str ) -> Optional[int]: '''simple docstring''' _lowercase : Union[str, Any] = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _lowercase : List[str] = next((addr for addr in addrs if addr.startswith('e' )) , UpperCamelCase_ ) return ifname def __UpperCAmelCase ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> Tuple[np.ndarray, List[dict]]: '''simple docstring''' if not dist.is_initialized(): _lowercase , _lowercase : Optional[Any] = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ ) # distributed training _lowercase : int = dist.get_world_size(group=self.process_group ) # gather logic _lowercase : Any = None if self._is_main(): _lowercase : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )] dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group ) # scatter logic _lowercase : Optional[Any] = question_hidden_states.shape[0] _lowercase : Optional[Any] = [] _lowercase : int = [] if self._is_main(): assert len(UpperCamelCase_ ) == world_size _lowercase , _lowercase : Any = self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ ) _lowercase , _lowercase : int = torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) _lowercase : str = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa ) _lowercase : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase__ ( A ): '''simple docstring''' A_ = (DDIMParallelScheduler,) A_ = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def __UpperCAmelCase ( self : Any , **UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' _lowercase : Tuple = { 'num_train_timesteps': 1000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**UpperCamelCase_ ) return config def __UpperCAmelCase ( self : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' _lowercase : List[str] = self.scheduler_classes[0] _lowercase : List[str] = self.get_scheduler_config(**UpperCamelCase_ ) _lowercase : Optional[Any] = scheduler_class(**UpperCamelCase_ ) _lowercase , _lowercase : Dict = 10, 0.0 _lowercase : Any = self.dummy_model() _lowercase : Any = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase_ ) for t in scheduler.timesteps: _lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample return sample def __UpperCAmelCase ( self : str ) -> List[str]: '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase_ ) _lowercase : List[str] = self.scheduler_classes[0] _lowercase : List[Any] = self.get_scheduler_config(steps_offset=1 ) _lowercase : str = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> List[Any]: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> str: '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Optional[Any] = self.scheduler_classes[0] _lowercase : List[Any] = self.get_scheduler_config() _lowercase : str = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self : int ) -> Dict: '''simple docstring''' _lowercase : Any = self.scheduler_classes[0] _lowercase : Optional[Any] = self.get_scheduler_config() _lowercase : List[Any] = scheduler_class(**UpperCamelCase_ ) _lowercase , _lowercase : Tuple = 10, 0.0 scheduler.set_timesteps(UpperCamelCase_ ) _lowercase : List[str] = self.dummy_model() _lowercase : List[str] = self.dummy_sample_deter _lowercase : Optional[Any] = self.dummy_sample_deter + 0.1 _lowercase : str = self.dummy_sample_deter - 0.1 _lowercase : str = samplea.shape[0] _lowercase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 ) _lowercase : str = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ ) _lowercase : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowercase : List[str] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ ) _lowercase : Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : Any = self.full_loop() _lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) ) _lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def __UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' _lowercase : int = self.full_loop(prediction_type='v_prediction' ) _lowercase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) ) _lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]: '''simple docstring''' _lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) _lowercase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) _lowercase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) _lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=13 , UpperCamelCase_ : int=32 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=[10, 20, 30, 40] , UpperCamelCase_ : Optional[Any]=[2, 2, 3, 2] , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : int=["stage2", "stage3", "stage4"] , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : List[str]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : Union[str, Any] = parent _lowercase : Optional[int] = batch_size _lowercase : List[Any] = image_size _lowercase : Union[str, Any] = num_channels _lowercase : List[Any] = num_stages _lowercase : Optional[int] = hidden_sizes _lowercase : int = depths _lowercase : Optional[int] = is_training _lowercase : Any = use_labels _lowercase : Any = intermediate_size _lowercase : Union[str, Any] = hidden_act _lowercase : Tuple = type_sequence_label_size _lowercase : Union[str, Any] = initializer_range _lowercase : Tuple = out_features _lowercase : Tuple = num_labels _lowercase : Any = scope _lowercase : int = num_stages def __UpperCAmelCase ( self : int ) -> str: '''simple docstring''' _lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : List[str] = None if self.use_labels: _lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : List[Any] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : int ) -> str: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase_ , loss_ignore_index=255 , num_labels=self.num_labels , ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = UperNetForSemanticSegmentation(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() _lowercase : int = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' _lowercase : Tuple = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : str = config_and_inputs _lowercase : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () A_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self : str ) -> Any: '''simple docstring''' _lowercase : Tuple = UperNetModelTester(self ) _lowercase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' return def __UpperCAmelCase ( self : List[str] ) -> Any: '''simple docstring''' _lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[Any] = model_class(UpperCamelCase_ ) _lowercase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : List[Any] = [*signature.parameters.keys()] _lowercase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ): _lowercase : Optional[int] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): _lowercase : str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowercase : Tuple = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : int = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : List[Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : List[str] = _config_zero_init(UpperCamelCase_ ) _lowercase : Any = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _lowercase : str = model_class(config=UpperCamelCase_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason='UperNet does not have tied weights' ) def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @slow def __UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Any = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def __UpperCamelCase ( ) -> int: _lowercase : Dict = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' ) _lowercase : Optional[int] = Image.open(_lowercase ).convert('RGB' ) return image @require_torch @require_vision @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) _lowercase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(UpperCamelCase_ ) _lowercase : Optional[Any] = prepare_img() _lowercase : Optional[int] = processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ ) with torch.no_grad(): _lowercase : List[str] = model(**UpperCamelCase_ ) _lowercase : Dict = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) _lowercase : Optional[Any] = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : str = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) _lowercase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(UpperCamelCase_ ) _lowercase : Tuple = prepare_img() _lowercase : List[Any] = processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ ) with torch.no_grad(): _lowercase : Optional[int] = model(**UpperCamelCase_ ) _lowercase : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) _lowercase : int = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase__ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=13 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Any=32 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=None , ) -> str: '''simple docstring''' _lowercase : Optional[int] = parent _lowercase : List[str] = batch_size _lowercase : Dict = image_size _lowercase : Optional[int] = patch_size _lowercase : List[str] = num_channels _lowercase : Optional[Any] = is_training _lowercase : Optional[Any] = use_labels _lowercase : Any = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : int = intermediate_size _lowercase : str = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Any = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowercase : int = (image_size // patch_size) ** 2 _lowercase : int = num_patches + 1 def __UpperCAmelCase ( self : Any ) -> Tuple: '''simple docstring''' _lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : List[Any] = None if self.use_labels: _lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Any = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Tuple ) -> int: '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' _lowercase : List[str] = TFViTModel(config=UpperCamelCase_ ) _lowercase : Optional[Any] = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. _lowercase : Union[str, Any] = self.image_size // 2 _lowercase : List[str] = pixel_values[:, :, :image_size, :image_size] _lowercase : str = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) _lowercase : Dict = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' _lowercase : Tuple = self.type_sequence_label_size _lowercase : int = TFViTForImageClassification(UpperCamelCase_ ) _lowercase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. _lowercase : List[Any] = self.image_size // 2 _lowercase : Any = pixel_values[:, :, :image_size, :image_size] _lowercase : Optional[Any] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowercase : Dict = 1 _lowercase : Union[str, Any] = TFViTForImageClassification(UpperCamelCase_ ) _lowercase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowercase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' _lowercase : Optional[Any] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : int = config_and_inputs _lowercase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () A_ = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' _lowercase : str = TFViTModelTester(self ) _lowercase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: '''simple docstring''' pass def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[int] = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _lowercase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def __UpperCAmelCase ( self : List[Any] ) -> Any: '''simple docstring''' _lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[Any] = model_class(UpperCamelCase_ ) _lowercase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Any = [*signature.parameters.keys()] _lowercase : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : str ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(UpperCamelCase_ ) def __UpperCamelCase ( ) -> int: _lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def __UpperCAmelCase ( self : Optional[Any] ) -> str: '''simple docstring''' _lowercase : Optional[Any] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) _lowercase : List[Any] = self.default_image_processor _lowercase : Dict = prepare_img() _lowercase : int = image_processor(images=UpperCamelCase_ , return_tensors='tf' ) # forward pass _lowercase : Optional[int] = model(**UpperCamelCase_ ) # verify the logits _lowercase : Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) _lowercase : str = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
4
'''simple docstring''' from __future__ import annotations import requests def __UpperCamelCase ( _lowercase ) -> dict: _lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_lowercase ).json() def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]: _lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories] return [get_hackernews_story(_lowercase ) for story_id in story_ids] def __UpperCamelCase ( _lowercase = 10 ) -> str: _lowercase : Tuple = hackernews_top_stories(_lowercase ) return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCamelCase__ : '''simple docstring''' def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> List[str]: '''simple docstring''' return None class lowerCamelCase__ : '''simple docstring''' def __UpperCAmelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' return None class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' A_ = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ ) @require_torch @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ ) @require_torch @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Any: '''simple docstring''' from transformers import BertModel _lowercase : Optional[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(UpperCamelCase_ ) ) vocab_file.flush() _lowercase : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: _lowercase : Any = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) ) model.save_pretrained(UpperCamelCase_ ) self._test_export(UpperCamelCase_ , 'pt' , 12 , UpperCamelCase_ ) @require_tf @slow def __UpperCAmelCase ( self : int ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _lowercase : Dict = self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ ) _lowercase : str = quantize(Path(UpperCamelCase_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def __UpperCAmelCase ( self : Dict ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _lowercase : int = self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ ) _lowercase : List[Any] = quantize(UpperCamelCase_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[Any] ) -> List[Any]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: _lowercase : int = Path(UpperCamelCase_ ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) return path except Exception as e: self.fail(UpperCamelCase_ ) @require_torch @require_tokenizers @slow def __UpperCAmelCase ( self : Any ) -> Tuple: '''simple docstring''' from transformers import BertModel _lowercase : List[str] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _lowercase : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'pt' ) @require_tf @require_tokenizers @slow def __UpperCAmelCase ( self : str ) -> List[str]: '''simple docstring''' from transformers import TFBertModel _lowercase : Any = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'tf' ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> List[Any]: '''simple docstring''' _lowercase : str = FeatureExtractionPipeline(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] _lowercase , _lowercase , _lowercase , _lowercase : Dict = infer_shapes(UpperCamelCase_ , UpperCamelCase_ ) # Assert all variables are present self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , UpperCamelCase_ ) self.assertSequenceEqual(variable_names[3:] , UpperCamelCase_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' _lowercase : Tuple = ['input_ids', 'attention_mask', 'token_type_ids'] _lowercase : str = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} _lowercase , _lowercase : str = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(UpperCamelCase_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(UpperCamelCase_ ) , set(UpperCamelCase_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(UpperCamelCase_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) _lowercase , _lowercase : str = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(UpperCamelCase_ ) , 1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Dict ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """megatron-bert""" def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Optional[Any] = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : Optional[Any] = use_cache
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Tuple = args.pruning_method _lowercase : int = args.threshold _lowercase : str = args.model_name_or_path.rstrip('/' ) _lowercase : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) ) _lowercase : List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _lowercase : Optional[int] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _lowercase : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _lowercase : Dict = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase ) _lowercase : Optional[Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _lowercase : Optional[Any] = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase ) _lowercase : str = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _lowercase : str = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase ) _lowercase : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _lowercase : Optional[int] = name[:-6] _lowercase : List[str] = model[f'''{prefix_}mask_scores'''] _lowercase , _lowercase : Union[str, Any] = -0.1, 1.1 _lowercase : str = torch.sigmoid(_lowercase ) _lowercase : int = s * (r - l) + l _lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 ) _lowercase : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _lowercase : List[Any] = os.path.join( os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' ) if not os.path.isdir(_lowercase ): shutil.copytree(_lowercase, _lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) _A : List[Any] =parser.parse_args() main(args)
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> int: _enforce_args(_lowercase, _lowercase ) if n == 0: return 0 _lowercase : List[Any] = float('-inf' ) for i in range(1, n + 1 ): _lowercase : int = max( _lowercase, prices[i - 1] + naive_cut_rod_recursive(n - i, _lowercase ) ) return max_revue def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]: _enforce_args(_lowercase, _lowercase ) _lowercase : int = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_lowercase, _lowercase, _lowercase ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowercase : int = float('-inf' ) for i in range(1, n + 1 ): _lowercase : List[str] = max( _lowercase, prices[i - 1] + _top_down_cut_rod_recursive(n - i, _lowercase, _lowercase ), ) _lowercase : List[str] = max_revenue return max_rev[n] def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _enforce_args(_lowercase, _lowercase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowercase : Any = [float('-inf' ) for _ in range(n + 1 )] _lowercase : List[str] = 0 for i in range(1, n + 1 ): _lowercase : Tuple = max_rev[i] for j in range(1, i + 1 ): _lowercase : Optional[int] = max(_lowercase, prices[j - 1] + max_rev[i - j] ) _lowercase : Union[str, Any] = max_revenue_i return max_rev[n] def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]: if n < 0: _lowercase : int = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(_lowercase ) if n > len(_lowercase ): _lowercase : Any = ( 'Each integral piece of rod must have a corresponding price. ' f'''Got n = {n} but length of prices = {len(_lowercase )}''' ) raise ValueError(_lowercase ) def __UpperCamelCase ( ) -> List[str]: _lowercase : Optional[Any] = [6, 10, 12, 15, 20, 23] _lowercase : Optional[int] = len(_lowercase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowercase : int = 36 _lowercase : Optional[int] = top_down_cut_rod(_lowercase, _lowercase ) _lowercase : List[Any] = bottom_up_cut_rod(_lowercase, _lowercase ) _lowercase : List[str] = naive_cut_rod_recursive(_lowercase, _lowercase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
4
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]] _lowercase : Tuple = DisjunctiveConstraint(UpperCamelCase_ ) self.assertTrue(isinstance(dc.token_ids , UpperCamelCase_ ) ) with self.assertRaises(UpperCamelCase_ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(UpperCamelCase_ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Union[str, Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(UpperCamelCase_ ): DisjunctiveConstraint(UpperCamelCase_ ) # fails here def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' _lowercase : List[Any] = [[1, 2, 3], [1, 2, 4]] _lowercase : Optional[int] = DisjunctiveConstraint(UpperCamelCase_ ) _lowercase , _lowercase , _lowercase : Any = dc.update(1 ) _lowercase : Dict = stepped is True and completed is False and reset is False self.assertTrue(UpperCamelCase_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _lowercase , _lowercase , _lowercase : Dict = dc.update(2 ) _lowercase : int = stepped is True and completed is False and reset is False self.assertTrue(UpperCamelCase_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _lowercase , _lowercase , _lowercase : Optional[int] = dc.update(3 ) _lowercase : str = stepped is True and completed is True and reset is False self.assertTrue(UpperCamelCase_ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __UpperCAmelCase ( self : str ) -> List[Any]: '''simple docstring''' _lowercase : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _lowercase : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase_ ) _lowercase , _lowercase , _lowercase : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _lowercase , _lowercase , _lowercase : List[str] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _lowercase , _lowercase , _lowercase : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) _lowercase , _lowercase , _lowercase : List[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() _lowercase , _lowercase , _lowercase : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) _lowercase , _lowercase , _lowercase : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) _lowercase , _lowercase , _lowercase : Any = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
1
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
1
'''simple docstring''' _A : List[Any] =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : str =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
1
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __UpperCamelCase ( _lowercase ) -> None: _lowercase , _lowercase : List[Any] = analyze_text(_lowercase ) _lowercase : Any = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. _lowercase : Union[str, Any] = sum(single_char_strings.values() ) # one length string _lowercase : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _lowercase : Any = single_char_strings[ch] _lowercase : int = my_str / all_sum my_fir_sum += prob * math.loga(_lowercase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _lowercase : str = sum(two_char_strings.values() ) _lowercase : str = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _lowercase : Optional[Any] = cha + cha if sequence in two_char_strings: _lowercase : int = two_char_strings[sequence] _lowercase : Optional[int] = int(_lowercase ) / all_sum my_sec_sum += prob * math.loga(_lowercase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]: _lowercase : Optional[Any] = Counter() # type: ignore _lowercase : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(_lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __UpperCamelCase ( ) -> List[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
4
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """microsoft/speecht5_tts""" A_ = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) A_ = """text_reader""" A_ = SpeechTaProcessor A_ = SpeechTaForTextToSpeech A_ = SpeechTaHifiGan A_ = ["""text"""] A_ = ["""audio"""] def __UpperCAmelCase ( self : List[Any] ) -> Any: '''simple docstring''' if self.post_processor is None: _lowercase : List[str] = 'microsoft/speecht5_hifigan' super().setup() def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ) -> Dict: '''simple docstring''' _lowercase : List[str] = self.pre_processor(text=UpperCamelCase_ , return_tensors='pt' , truncation=UpperCamelCase_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' ) _lowercase : Dict = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' ) _lowercase : Optional[Any] = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] ) -> Tuple: '''simple docstring''' with torch.no_grad(): return self.model.generate_speech(**UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any ) -> int: '''simple docstring''' with torch.no_grad(): return self.post_processor(UpperCamelCase_ ).cpu().detach()
4
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowercase : List[Any] = 'The dog is cute and lives in the garden house' _lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] ) _lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowercase : Tuple = jnp.array( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) _lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state'] self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
4
1
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version _A : Union[str, Any] =logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') _A : List[Any] ={ '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization _A : Optional[int] ={ '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } _A : Any =sorted(arg_to_scheduler.keys()) _A : Dict ='''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class lowerCamelCase__ ( pl.LightningModule ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : argparse.Namespace , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[str]="base" , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple , ) -> Optional[int]: '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(UpperCamelCase_ ) _lowercase : List[Any] = 0 _lowercase : Any = Path(self.hparams.output_dir ) _lowercase : str = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _lowercase : List[Any] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase_ , **UpperCamelCase_ , ) else: _lowercase : PretrainedConfig = config _lowercase : Dict = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , UpperCamelCase_ , UpperCamelCase_ ): assert hasattr(self.config , UpperCamelCase_ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , UpperCamelCase_ , getattr(self.hparams , UpperCamelCase_ ) ) if tokenizer is None: _lowercase : Union[str, Any] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase_ , ) else: _lowercase : PreTrainedTokenizer = tokenizer _lowercase : Any = MODEL_MODES[mode] if model is None: _lowercase : Union[str, Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase_ , ) else: _lowercase : str = model def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : str ) -> Optional[int]: '''simple docstring''' _lowercase : List[str] = self.model_type.from_pretrained(*UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' _lowercase : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] _lowercase : List[str] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _lowercase : Optional[int] = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def __UpperCAmelCase ( self : Any ) -> int: '''simple docstring''' _lowercase : Tuple = self.model _lowercase : int = ['bias', 'LayerNorm.weight'] _lowercase : int = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: _lowercase : Dict = Adafactor( UpperCamelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase_ , relative_step=UpperCamelCase_ ) else: _lowercase : str = AdamW( UpperCamelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _lowercase : Any = optimizer _lowercase : str = self.get_lr_scheduler() return [optimizer], [scheduler] def __UpperCAmelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> int: '''simple docstring''' return self.validation_step(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str ) -> str: '''simple docstring''' return self.validation_end(UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> int: '''simple docstring''' _lowercase : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _lowercase : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' if stage == "test": _lowercase : str = len(self.test_dataloader().dataset ) else: _lowercase : Union[str, Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=UpperCamelCase_ ) _lowercase : str = len(self.train_dataloader().dataset ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> List[Any]: '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' return self.train_loader def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: '''simple docstring''' return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Tuple ) -> List[str]: '''simple docstring''' return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( UpperCamelCase_ , list(filter(UpperCamelCase_ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict[str, Any] ) -> None: '''simple docstring''' _lowercase : Tuple = self.output_dir.joinpath('best_tfmr' ) _lowercase : List[str] = self.step_count self.model.save_pretrained(UpperCamelCase_ ) self.tokenizer.save_pretrained(UpperCamelCase_ ) @staticmethod def __UpperCAmelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' parser.add_argument( '--model_name_or_path' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=UpperCamelCase_ , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(UpperCamelCase_ ).parent / 'test_run' / 'cache' ) , type=UpperCamelCase_ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=UpperCamelCase_ , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=UpperCamelCase_ , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=UpperCamelCase_ , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=UpperCamelCase_ , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=UpperCamelCase_ , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=UpperCamelCase_ , metavar=UpperCamelCase_ , type=UpperCamelCase_ , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase_ , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=UpperCamelCase_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=UpperCamelCase_ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=UpperCamelCase_ , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=UpperCamelCase_ ) parser.add_argument('--train_batch_size' , default=32 , type=UpperCamelCase_ ) parser.add_argument('--eval_batch_size' , default=32 , type=UpperCamelCase_ ) parser.add_argument('--adafactor' , action='store_true' ) class lowerCamelCase__ ( pl.Callback ): '''simple docstring''' def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase__ ( pl.Callback ): '''simple docstring''' def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(UpperCamelCase_ ) class lowerCamelCase__ ( pl.Callback ): '''simple docstring''' def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> List[str]: '''simple docstring''' _lowercase : List[str] = trainer.lr_schedulers[0]['scheduler'] _lowercase : Union[str, Any] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(UpperCamelCase_ ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> Any: '''simple docstring''' rank_zero_info('***** Validation results *****' ) _lowercase : Optional[int] = trainer.callback_metrics # Log results for key in sorted(UpperCamelCase_ ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) ) def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : pl.Trainer , UpperCamelCase_ : pl.LightningModule ) -> List[str]: '''simple docstring''' rank_zero_info('***** Test results *****' ) _lowercase : int = trainer.callback_metrics # Log and save results to file _lowercase : List[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(UpperCamelCase_ , 'w' ) as writer: for key in sorted(UpperCamelCase_ ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(UpperCamelCase_ , str(metrics[key] ) ) ) def __UpperCamelCase ( _lowercase, _lowercase ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir', default=str(Path(_lowercase ).parent / 'test_run' / 'model_checkpoints' ), type=_lowercase, help='The output directory where the model predictions and checkpoints will be written.', ) parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit', ) parser.add_argument( '--fp16_opt_level', type=_lowercase, default='O2', help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ), ) parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=_lowercase ) parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=_lowercase, help='Max gradient norm' ) parser.add_argument('--do_train', action='store_true', help='Whether to run training.' ) parser.add_argument('--do_predict', action='store_true', help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps', dest='accumulate_grad_batches', type=_lowercase, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', ) parser.add_argument('--seed', type=_lowercase, default=42, help='random seed for initialization' ) parser.add_argument( '--data_dir', default=str(Path(_lowercase ).parent / 'test_run' / 'dummy-train-data' ), type=_lowercase, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.', ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=None, _lowercase=True, _lowercase=[], _lowercase=None, _lowercase=None, **_lowercase, ) -> int: pl.seed_everything(args.seed ) # init model _lowercase : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowercase ) # add custom checkpoints if checkpoint_callback is None: _lowercase : str = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix='checkpoint', monitor='val_loss', mode='min', save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowercase ) if logging_callback is None: _lowercase : str = LoggingCallback() _lowercase : Optional[int] = {} if args.fpaa: _lowercase : Any = 16 if args.gpus > 1: _lowercase : List[Any] = 'auto' _lowercase : List[Any] = 'ddp' _lowercase : Dict = args.accumulate_grad_batches _lowercase : List[str] = None _lowercase : List[str] = 'auto' _lowercase : Any = pl.Trainer.from_argparse_args( _lowercase, weights_summary=_lowercase, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=_lowercase, val_check_interval=1, num_sanity_val_steps=2, **_lowercase, ) if args.do_train: trainer.fit(_lowercase ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
4
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase__ ( A ): '''simple docstring''' A_ = 42 class lowerCamelCase__ ( A , A ): '''simple docstring''' @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : float = 0.1_82_15 , UpperCamelCase_ : str = "group" , ) -> Optional[Any]: '''simple docstring''' super().__init__() # pass init params to Encoder _lowercase : List[str] = Encoder( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , ) _lowercase : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels _lowercase : Tuple = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 ) _lowercase : List[str] = VectorQuantizer(UpperCamelCase_ , UpperCamelCase_ , beta=0.25 , remap=UpperCamelCase_ , sane_index_shape=UpperCamelCase_ ) _lowercase : Optional[Any] = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 ) # pass init params to Decoder _lowercase : Tuple = Decoder( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , norm_type=UpperCamelCase_ , ) @apply_forward_hook def __UpperCAmelCase ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' _lowercase : List[str] = self.encoder(UpperCamelCase_ ) _lowercase : Optional[int] = self.quant_conv(UpperCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCamelCase_ ) @apply_forward_hook def __UpperCAmelCase ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' if not force_not_quantize: _lowercase , _lowercase , _lowercase : int = self.quantize(UpperCamelCase_ ) else: _lowercase : Optional[Any] = h _lowercase : Tuple = self.post_quant_conv(UpperCamelCase_ ) _lowercase : List[str] = self.decoder(UpperCamelCase_ , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' _lowercase : Any = sample _lowercase : Optional[int] = self.encode(UpperCamelCase_ ).latents _lowercase : List[Any] = self.decode(UpperCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase_ )
4
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A : List[str] ='''pt''' elif is_tf_available(): _A : Tuple ='''tf''' else: _A : Optional[int] ='''jax''' class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = ByTaTokenizer A_ = False def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().setUp() _lowercase : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' _lowercase : Dict = [] for i in range(len(UpperCamelCase_ ) ): try: _lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) ) _lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) ) if max_length is not None and len(UpperCamelCase_ ) > max_length: _lowercase : List[Any] = toks[:max_length] if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0: while len(UpperCamelCase_ ) < min_length: _lowercase : Tuple = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Dict = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) if " " not in output_txt and len(UpperCamelCase_ ) > 1: _lowercase : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ ) ) if with_prefix_space: _lowercase : Union[str, Any] = ' ' + output_txt _lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) return output_txt, output_ids def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' _lowercase : List[str] = self.ta_base_tokenizer _lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) _lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = self.ta_base_tokenizer _lowercase : Tuple = 'Unicode โ‚ฌ.' _lowercase : List[Any] = tokenizer(UpperCamelCase_ ) _lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : List[str] = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'Unicode โ‚ฌ.</s>' ) _lowercase : Any = tokenizer('e รจ รฉ รช รซ' ) _lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : Tuple = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'e รจ รฉ รช รซ</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e รจ รฉ รช รซ' ) ) , 'e รจ รฉ รช รซ</s>' ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = self.ta_base_tokenizer _lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) if FRAMEWORK != "jax": _lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = self.ta_base_tokenizer _lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , UpperCamelCase_ ) self.assertIn('attention_mask' , UpperCamelCase_ ) self.assertNotIn('decoder_input_ids' , UpperCamelCase_ ) self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> int: '''simple docstring''' _lowercase : Tuple = self.ta_base_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : str = tokenizer( text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' _lowercase : str = self.ta_base_tokenizer _lowercase : str = ['A long paragraph for summarization. </s>'] _lowercase : Optional[int] = ['Summary of the text. </s>'] # fmt: off _lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] ) self.assertEqual(UpperCamelCase_ , batch['labels'][0] ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowercase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[Any] = tempfile.mkdtemp() _lowercase : Any = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) shutil.rmtree(UpperCamelCase_ ) _lowercase : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : int = json.load(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(UpperCamelCase_ ) _lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )] _lowercase : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : int = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )] _lowercase : Tuple = tokenizer_class.from_pretrained( UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) _lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] _lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Optional[int] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _lowercase : Optional[int] = 0 _lowercase : int = tokenizer.convert_ids_to_tokens( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for attr in attributes_list: setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
4
1
'''simple docstring''' _A : Dict =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
'''simple docstring''' _A : Dict =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
1
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def __UpperCamelCase ( _lowercase ) -> int: if isinstance(_lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCamelCase__ : '''simple docstring''' def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : int ) -> Optional[int]: '''simple docstring''' pass def __UpperCAmelCase ( self : Any ) -> Optional[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float ) -> Tuple: '''simple docstring''' _lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase_ , UpperCamelCase_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , **UpperCamelCase_ : Any ) -> Optional[int]: '''simple docstring''' _lowercase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(UpperCamelCase_ ) _lowercase : int = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase , _lowercase : List[str] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[Any] = {'vision_model': vision_model, 'text_model': text_model} _lowercase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ ) _lowercase : Tuple = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=None , **UpperCamelCase_ : int ) -> List[Any]: '''simple docstring''' _lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[Any] = {'vision_model': vision_model, 'text_model': text_model} _lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ ) _lowercase : Any = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) _lowercase : List[Any] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ ) _lowercase : Union[str, Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) _lowercase : Union[str, Any] = after_output[0] _lowercase : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' _lowercase , _lowercase : Dict = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model} _lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ ) _lowercase : Tuple = model( input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ ) _lowercase : Any = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _lowercase : str = to_atuple(vision_model.config.image_size ) _lowercase : Dict = to_atuple(vision_model.config.patch_size ) _lowercase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowercase : int = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' pt_model.to(UpperCamelCase_ ) pt_model.eval() # prepare inputs _lowercase : List[str] = inputs_dict _lowercase : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _lowercase : Tuple = pt_model(**UpperCamelCase_ ).to_tuple() _lowercase : str = fx_model(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase_ ) _lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) _lowercase : Union[str, Any] = fx_model_loaded(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[int] = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ , from_flax=UpperCamelCase_ ) pt_model_loaded.to(UpperCamelCase_ ) pt_model_loaded.eval() with torch.no_grad(): _lowercase : List[str] = pt_model_loaded(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(UpperCamelCase_ , pt_output_loaded.numpy() , 4E-2 ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> str: '''simple docstring''' _lowercase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Union[str, Any] = VisionTextDualEncoderModel(UpperCamelCase_ ) _lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(UpperCamelCase_ ) _lowercase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase_ ) _lowercase : List[Any] = fx_state self.check_pt_flax_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = VisionTextDualEncoderModel(UpperCamelCase_ ) _lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase_ ) _lowercase : Tuple = load_flax_weights_in_pytorch_model(UpperCamelCase_ , fx_model.params ) self.check_pt_flax_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> Any: '''simple docstring''' _lowercase : Optional[int] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase_ ) def __UpperCAmelCase ( self : str ) -> Dict: '''simple docstring''' _lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase_ ) @is_pt_flax_cross_test def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : Union[str, Any] = self.prepare_config_and_inputs() _lowercase : List[Any] = config_inputs_dict.pop('vision_config' ) _lowercase : List[str] = config_inputs_dict.pop('text_config' ) _lowercase : List[Any] = config_inputs_dict self.check_equivalence_pt_to_flax(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.check_equivalence_flax_to_pt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: '''simple docstring''' _lowercase , _lowercase : List[str] = self.get_pretrained_model_and_inputs() _lowercase : int = model_a(**UpperCamelCase_ ) _lowercase : int = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase_ ) _lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ ) _lowercase : int = model_a(**UpperCamelCase_ ) _lowercase : Any = after_outputs[0] _lowercase : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCamelCase_ , text_from_pt=UpperCamelCase_ , ) _lowercase : Union[str, Any] = 13 _lowercase : int = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _lowercase : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _lowercase : Tuple = random_attention_mask([batch_size, 4] ) _lowercase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = FlaxViTModel(UpperCamelCase_ ) _lowercase : Optional[Any] = FlaxBertModel(UpperCamelCase_ ) return vision_model, text_model def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = FlaxViTModelTester(self ) _lowercase : Optional[int] = FlaxBertModelTester(self ) _lowercase : List[Any] = vit_model_tester.prepare_config_and_inputs() _lowercase : Any = bert_model_tester.prepare_config_and_inputs() _lowercase , _lowercase : Optional[Any] = vision_config_and_inputs _lowercase , _lowercase , _lowercase , _lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Any ) -> str: '''simple docstring''' _lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCamelCase_ , text_from_pt=UpperCamelCase_ , ) _lowercase : Tuple = 13 _lowercase : List[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _lowercase : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _lowercase : Union[str, Any] = random_attention_mask([batch_size, 4] ) _lowercase : Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> int: '''simple docstring''' _lowercase : List[str] = FlaxCLIPVisionModel(UpperCamelCase_ ) _lowercase : str = FlaxBertModel(UpperCamelCase_ ) return vision_model, text_model def __UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = FlaxCLIPVisionModelTester(self ) _lowercase : Optional[Any] = FlaxBertModelTester(self ) _lowercase : Any = clip_model_tester.prepare_config_and_inputs() _lowercase : str = bert_model_tester.prepare_config_and_inputs() _lowercase , _lowercase : Tuple = vision_config_and_inputs _lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' _lowercase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) _lowercase : int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) _lowercase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowercase : Union[str, Any] = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='np' ) _lowercase : Dict = model(**UpperCamelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _lowercase : Union[str, Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) )
4
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : int = torch.exp(_lowercase ) _lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i) _lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowercase ) - B / A class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__() _lowercase : int = config.output_attentions _lowercase : int = config.output_hidden_states _lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int: '''simple docstring''' if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): _lowercase : Optional[Any] = x else: _lowercase : Optional[int] = x def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : int = () _lowercase : List[Any] = () _lowercase : Tuple = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowercase : Optional[int] = all_hidden_states + (hidden_states,) _lowercase : str = layer_module( UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = layer_outputs[0] if self.output_attentions: _lowercase : Tuple = all_attentions + (layer_outputs[1],) _lowercase : Optional[int] = (hidden_states,) if self.output_hidden_states: _lowercase : str = current_outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[int] = current_outputs + (all_attentions,) _lowercase : List[Any] = self.highway[i](UpperCamelCase_ ) # logits, pooled_output if not self.training: _lowercase : Dict = highway_exit[0] _lowercase : Tuple = entropy(UpperCamelCase_ ) _lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowercase : str = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase_ , i + 1 ) else: _lowercase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowercase : str = all_hidden_states + (hidden_states,) _lowercase : Optional[Any] = (hidden_states,) if self.output_hidden_states: _lowercase : Dict = outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[Any] = outputs + (all_attentions,) _lowercase : Optional[int] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : int = config _lowercase : int = BertEmbeddings(UpperCamelCase_ ) _lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ ) _lowercase : Any = BertPooler(UpperCamelCase_ ) self.init_weights() def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' return self.embeddings.word_embeddings def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = value def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _lowercase : Any = input_ids.size() elif inputs_embeds is not None: _lowercase : Any = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if encoder_attention_mask is None: _lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: _lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowercase : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowercase : int = encoder_attention_mask[:, None, None, :] _lowercase : str = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) _lowercase : Dict = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) _lowercase : List[Any] = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _lowercase : int = encoder_outputs[0] _lowercase : str = self.pooler(UpperCamelCase_ ) _lowercase : List[Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = message _lowercase : Dict = exit_layer # start from 1! class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' super().__init__() _lowercase : Optional[Any] = BertPooler(UpperCamelCase_ ) _lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowercase : int = nn.Linear(config.hidden_size , config.num_labels ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' _lowercase : str = encoder_outputs[0] _lowercase : int = self.pooler(UpperCamelCase_ ) # "return" pooler_output # BertModel _lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowercase : Dict = bmodel_output[1] _lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ ) _lowercase : str = self.classifier(UpperCamelCase_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : Dict = config.num_labels _lowercase : Any = config.num_hidden_layers _lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ ) _lowercase : Any = nn.Dropout(config.hidden_dropout_prob ) _lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = self.num_layers try: _lowercase : Tuple = self.bert( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowercase : List[Any] = outputs[1] _lowercase : int = self.dropout(UpperCamelCase_ ) _lowercase : Optional[int] = self.classifier(UpperCamelCase_ ) _lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowercase : Union[str, Any] = e.message _lowercase : Any = e.exit_layer _lowercase : Optional[int] = outputs[0] if not self.training: _lowercase : Union[str, Any] = entropy(UpperCamelCase_ ) _lowercase : Tuple = [] _lowercase : Tuple = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowercase : Tuple = MSELoss() _lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Union[str, Any] = CrossEntropyLoss() _lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowercase : Optional[Any] = [] for highway_exit in outputs[-1]: _lowercase : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowercase : Union[str, Any] = MSELoss() _lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Dict = CrossEntropyLoss() _lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: _lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowercase : Optional[Any] = (loss,) + outputs if not self.training: _lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowercase : Dict = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
4
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """markuplm""" def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[Any] = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : List[Any] = type_vocab_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Optional[Any] = position_embedding_type _lowercase : str = use_cache _lowercase : str = classifier_dropout # additional properties _lowercase : int = max_depth _lowercase : Dict = max_xpath_tag_unit_embeddings _lowercase : str = max_xpath_subs_unit_embeddings _lowercase : List[str] = tag_pad_id _lowercase : Optional[int] = subs_pad_id _lowercase : Any = xpath_unit_hidden_size
4
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Tuple = [2, 4, 6, 8, 10, 12] _lowercase : Optional[Any] = 100 self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' self.assertRaisesRegex( UpperCamelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
4
1
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=16 , UpperCamelCase_ : str=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : str=30 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=None , ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = parent _lowercase : Tuple = batch_size _lowercase : Dict = decoder_seq_length # For common tests _lowercase : Optional[Any] = self.decoder_seq_length _lowercase : str = is_training _lowercase : Optional[Any] = use_attention_mask _lowercase : int = use_labels _lowercase : Dict = vocab_size _lowercase : str = d_model _lowercase : Any = d_model _lowercase : Dict = decoder_layers _lowercase : Dict = decoder_layers _lowercase : Optional[int] = decoder_ffn_dim _lowercase : str = decoder_attention_heads _lowercase : Union[str, Any] = decoder_attention_heads _lowercase : str = eos_token_id _lowercase : Dict = bos_token_id _lowercase : Dict = pad_token_id _lowercase : Tuple = decoder_start_token_id _lowercase : Optional[Any] = use_cache _lowercase : int = max_position_embeddings _lowercase : List[str] = None _lowercase : str = decoder_seq_length _lowercase : List[str] = 2 _lowercase : str = 1 def __UpperCAmelCase ( self : Tuple ) -> List[Any]: '''simple docstring''' _lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowercase : Optional[Any] = None if self.use_attention_mask: _lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowercase : Optional[Any] = None if self.use_labels: _lowercase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowercase : Union[str, Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , ) -> Optional[int]: '''simple docstring''' _lowercase : List[str] = True _lowercase : Union[str, Any] = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval() _lowercase : int = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowercase : Optional[int] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ ) _lowercase : Optional[int] = model(UpperCamelCase_ ) _lowercase : Optional[int] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ ) self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) ) self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 ) _lowercase : List[Any] = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids _lowercase : int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowercase : int = model(UpperCamelCase_ )['last_hidden_state'] _lowercase : Dict = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['last_hidden_state'] # select random slice _lowercase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowercase : Union[str, Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowercase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) def __UpperCAmelCase ( self : Dict ) -> Optional[int]: '''simple docstring''' _lowercase : List[Any] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = config_and_inputs _lowercase : int = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , A , unittest.TestCase ): '''simple docstring''' A_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A_ = (TrOCRForCausalLM,) if is_torch_available() else () A_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} A_ = True A_ = False def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _lowercase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ ) _lowercase : int = ConfigTester(self , config_class=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' pass def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' pass def __UpperCAmelCase ( self : Union[str, Any] ) -> str: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Tuple ) -> int: '''simple docstring''' _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' pass
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Tuple =['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _A : Any =re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCamelCase__ : '''simple docstring''' A_ = 42 A_ = None A_ = None A_ = None A_ = None def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: '''simple docstring''' _lowercase , _lowercase , _lowercase : Optional[int] = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> int: '''simple docstring''' return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return self.major, self.minor, self.patch def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> List[Any]: '''simple docstring''' if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return Version(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): return other raise TypeError(F'''{other} (type {type(UpperCamelCase_ )}) cannot be compared to version.''' ) def __eq__( self : int , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' try: _lowercase : List[str] = self._validate_operand(UpperCamelCase_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : int , UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = self._validate_operand(UpperCamelCase_ ) return self.tuple < other.tuple def __hash__( self : List[Any] ) -> List[str]: '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def __UpperCAmelCase ( cls : Any , UpperCamelCase_ : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def __UpperCAmelCase ( self : Dict ) -> str: '''simple docstring''' return self.version_str def __UpperCamelCase ( _lowercase ) -> Any: _lowercase : int = _VERSION_REG.match(_lowercase ) if not res: raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowercase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def __UpperCamelCase ( _lowercase ) -> int: return ".".join(str(_lowercase ) for v in version_tuple )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """markuplm""" def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[Any] = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : List[Any] = type_vocab_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Optional[Any] = position_embedding_type _lowercase : str = use_cache _lowercase : str = classifier_dropout # additional properties _lowercase : int = max_depth _lowercase : Dict = max_xpath_tag_unit_embeddings _lowercase : str = max_xpath_subs_unit_embeddings _lowercase : List[str] = tag_pad_id _lowercase : Optional[int] = subs_pad_id _lowercase : Any = xpath_unit_hidden_size
4
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _A : List[Any] =logging.get_logger(__name__) _A : Any =OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) _A : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __UpperCamelCase ( _lowercase ) -> str: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _lowercase : Optional[Any] = model_type_to_module_name(_lowercase ) _lowercase : Tuple = importlib.import_module(f'''.{module_name}''', 'transformers.models' ) try: return getattr(_lowercase, _lowercase ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_lowercase, '__name__', _lowercase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _lowercase : Optional[Any] = importlib.import_module('transformers' ) if hasattr(_lowercase, _lowercase ): return getattr(_lowercase, _lowercase ) return None def __UpperCamelCase ( _lowercase, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, **_lowercase, ) -> List[Any]: _lowercase : List[Any] = get_file_from_repo( _lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(_lowercase, encoding='utf-8' ) as reader: return json.load(_lowercase ) class lowerCamelCase__ : '''simple docstring''' def __init__( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(UpperCamelCase_ ) def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' _lowercase : Any = kwargs.pop('config' , UpperCamelCase_ ) _lowercase : Any = kwargs.pop('trust_remote_code' , UpperCamelCase_ ) _lowercase : Dict = True _lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Any = config_dict.get('image_processor_type' , UpperCamelCase_ ) _lowercase : List[str] = None if "AutoImageProcessor" in config_dict.get('auto_map' , {} ): _lowercase : Tuple = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _lowercase : Tuple = config_dict.pop('feature_extractor_type' , UpperCamelCase_ ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _lowercase : List[str] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): _lowercase : Tuple = config_dict['auto_map']['AutoFeatureExtractor'] _lowercase : Dict = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : str = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) # It could be in `config.image_processor_type`` _lowercase : str = getattr(UpperCamelCase_ , 'image_processor_type' , UpperCamelCase_ ) if hasattr(UpperCamelCase_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map: _lowercase : Tuple = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _lowercase : List[str] = image_processor_class_from_name(UpperCamelCase_ ) _lowercase : List[Any] = image_processor_auto_map is not None _lowercase : Union[str, Any] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING _lowercase : Tuple = resolve_trust_remote_code( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if has_remote_code and trust_remote_code: _lowercase : Dict = get_class_from_dynamic_module( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Tuple = kwargs.pop('code_revision' , UpperCamelCase_ ) if os.path.isdir(UpperCamelCase_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) elif image_processor_class is not None: return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING: _lowercase : Union[str, Any] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )] return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) raise ValueError( F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def __UpperCAmelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
4
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : int ={ '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : List[str] =['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] =['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys _A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
1
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _A : int =logging.get_logger(__name__) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None ) -> int: # Recurse if needed if "." in tensor_name: _lowercase : List[str] = tensor_name.split('.' ) for split in splits[:-1]: _lowercase : Optional[int] = getattr(_lowercase, _lowercase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) _lowercase : int = new_module _lowercase : List[str] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) _lowercase : Union[str, Any] = tensor_name in module._buffers _lowercase : str = getattr(_lowercase, _lowercase ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False if is_buffer or not is_bitsandbytes_available(): _lowercase : Optional[Any] = False _lowercase : List[Any] = False else: _lowercase : Optional[Any] = hasattr(bnb.nn, 'Params4bit' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit ) _lowercase : Union[str, Any] = isinstance(module._parameters[tensor_name], bnb.nn.IntaParams ) if is_abit or is_abit: _lowercase : Any = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _lowercase : Union[str, Any] = old_value.to(_lowercase ) elif isinstance(_lowercase, torch.Tensor ): _lowercase : Dict = value.to('cpu' ) if value.dtype == torch.inta: _lowercase : List[str] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: _lowercase : Union[str, Any] = torch.tensor(_lowercase, device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, _lowercase ) and fpaa_statistics is None: _lowercase : Tuple = new_value.T _lowercase : List[str] = old_value.__dict__ if is_abit: _lowercase : List[Any] = bnb.nn.IntaParams(_lowercase, requires_grad=_lowercase, **_lowercase ).to(_lowercase ) elif is_abit: _lowercase : int = bnb.nn.Paramsabit(_lowercase, requires_grad=_lowercase, **_lowercase ).to(_lowercase ) _lowercase : int = new_value if fpaa_statistics is not None: setattr(module.weight, 'SCB', fpaa_statistics.to(_lowercase ) ) else: if value is None: _lowercase : Optional[int] = old_value.to(_lowercase ) elif isinstance(_lowercase, torch.Tensor ): _lowercase : int = value.to(_lowercase ) else: _lowercase : List[Any] = torch.tensor(_lowercase, device=_lowercase ) if is_buffer: _lowercase : int = new_value else: _lowercase : int = nn.Parameter(_lowercase, requires_grad=old_value.requires_grad ) _lowercase : Tuple = new_value def __UpperCamelCase ( _lowercase, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=False ) -> Union[str, Any]: for name, module in model.named_children(): if current_key_name is None: _lowercase : Dict = [] current_key_name.append(_lowercase ) if (isinstance(_lowercase, nn.Linear ) or isinstance(_lowercase, _lowercase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(_lowercase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_lowercase, _lowercase ): _lowercase , _lowercase : Optional[int] = module.weight.shape else: _lowercase : Any = module.in_features _lowercase : str = module.out_features if quantization_config.quantization_method() == "llm_int8": _lowercase : int = bnb.nn.LinearabitLt( _lowercase, _lowercase, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, ) _lowercase : List[str] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _lowercase : int = bnb.nn.Linearabit( _lowercase, _lowercase, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, ) _lowercase : Dict = True # Store the module class in case we need to transpose the weight later _lowercase : str = type(_lowercase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_lowercase ) if len(list(module.children() ) ) > 0: _lowercase , _lowercase : str = _replace_with_bnb_linear( _lowercase, _lowercase, _lowercase, _lowercase, has_been_replaced=_lowercase, ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCamelCase ( _lowercase, _lowercase=None, _lowercase=None, _lowercase=None ) -> Optional[int]: _lowercase : Optional[Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert _lowercase , _lowercase : List[Any] = _replace_with_bnb_linear( _lowercase, _lowercase, _lowercase, _lowercase ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def __UpperCamelCase ( *_lowercase, **_lowercase ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead', _lowercase, ) return replace_with_bnb_linear(*_lowercase, **_lowercase ) def __UpperCamelCase ( *_lowercase, **_lowercase ) -> Dict: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead', _lowercase, ) return set_module_quantized_tensor_to_device(*_lowercase, **_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: _lowercase : List[Any] = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _lowercase : int = find_tied_parameters(_lowercase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowercase, _lowercase ): _lowercase : Dict = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() ) else: _lowercase : List[Any] = sum(_lowercase, [] ) _lowercase : Any = len(_lowercase ) > 0 # Check if it is a base model _lowercase : str = not hasattr(_lowercase, model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowercase : int = list(model.named_children() ) _lowercase : List[str] = [list_modules[-1][0]] # add last module together with tied weights _lowercase : Optional[Any] = set(_lowercase ) - set(_lowercase ) _lowercase : Optional[Any] = list(set(_lowercase ) ) + list(_lowercase ) # remove ".weight" from the keys _lowercase : Dict = ['.weight', '.bias'] _lowercase : Dict = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowercase : int = name.replace(_lowercase, '' ) filtered_module_names.append(_lowercase ) return filtered_module_names
4
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _A : Tuple =logging.get_logger(__name__) def __UpperCamelCase ( _lowercase ) -> int: _lowercase : Optional[Any] = 'huggingface/label-files' _lowercase : Tuple = 'imagenet-1k-id2label.json' _lowercase : int = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type='dataset' ), 'r' ) ) _lowercase : Dict = {int(_lowercase ): v for k, v in idalabel.items()} _lowercase : int = {v: k for k, v in idalabel.items()} _lowercase : str = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowercase : Dict = BitConfig( conv_layer=_lowercase, num_labels=1000, idalabel=_lowercase, labelaid=_lowercase, ) return config def __UpperCamelCase ( _lowercase ) -> Tuple: if "stem.conv" in name: _lowercase : Optional[Any] = name.replace('stem.conv', 'bit.embedder.convolution' ) if "blocks" in name: _lowercase : int = name.replace('blocks', 'layers' ) if "head.fc" in name: _lowercase : Dict = name.replace('head.fc', 'classifier.1' ) if name.startswith('norm' ): _lowercase : Union[str, Any] = 'bit.' + name if "bit" not in name and "classifier" not in name: _lowercase : int = 'bit.encoder.' + name return name def __UpperCamelCase ( ) -> List[str]: _lowercase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase : Optional[int] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=False ) -> List[Any]: _lowercase : str = get_config(_lowercase ) # load original model from timm _lowercase : List[str] = create_model(_lowercase, pretrained=_lowercase ) timm_model.eval() # load state_dict of original model _lowercase : Union[str, Any] = timm_model.state_dict() for key in state_dict.copy().keys(): _lowercase : Union[str, Any] = state_dict.pop(_lowercase ) _lowercase : Union[str, Any] = val.squeeze() if 'head' in key else val # load HuggingFace model _lowercase : Any = BitForImageClassification(_lowercase ) model.eval() model.load_state_dict(_lowercase ) # create image processor _lowercase : int = create_transform(**resolve_data_config({}, model=_lowercase ) ) _lowercase : Optional[Any] = transform.transforms _lowercase : Optional[int] = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } _lowercase : Union[str, Any] = BitImageProcessor( do_resize=_lowercase, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=_lowercase, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=_lowercase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) _lowercase : Any = prepare_img() _lowercase : str = transform(_lowercase ).unsqueeze(0 ) _lowercase : Union[str, Any] = processor(_lowercase, return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(_lowercase, _lowercase ) # verify logits with torch.no_grad(): _lowercase : List[Any] = model(_lowercase ) _lowercase : Union[str, Any] = outputs.logits print('Logits:', logits[0, :3] ) print('Predicted class:', model.config.idalabel[logits.argmax(-1 ).item()] ) _lowercase : str = timm_model(_lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowercase, outputs.logits, atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) processor.save_pretrained(_lowercase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": _A : Any =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''resnetv2_50x1_bitm''', type=str, help='''Name of the BiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub.''', ) _A : str =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[str] = torch.nn.Linear(10 , 10 ) _lowercase : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _lowercase : str = Accelerator() _lowercase : str = accelerator.prepare(UpperCamelCase_ ) try: pickle.loads(pickle.dumps(UpperCamelCase_ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __UpperCamelCase ( _lowercase ) -> Union[str, Any]: return x + 2 class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : str ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = 'x = 3' _lowercase : str = {} _lowercase : Union[str, Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) assert result == 3 self.assertDictEqual(UpperCamelCase_ , {'x': 3} ) _lowercase : str = 'x = y' _lowercase : int = {'y': 5} _lowercase : Optional[int] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 5, 'y': 5} ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _lowercase : str = 'y = add_two(x)' _lowercase : List[Any] = {'x': 3} _lowercase : int = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ ) assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} ) # Won't work without the tool with CaptureStdout() as out: _lowercase : Dict = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) assert result is None assert "tried to execute add_two" in out.out def __UpperCAmelCase ( self : str ) -> Any: '''simple docstring''' _lowercase : Dict = 'x = 3' _lowercase : Any = {} _lowercase : List[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) assert result == 3 self.assertDictEqual(UpperCamelCase_ , {'x': 3} ) def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _lowercase : List[str] = 'test_dict = {\'x\': x, \'y\': add_two(x)}' _lowercase : Optional[int] = {'x': 3} _lowercase : List[str] = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} ) self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : Optional[Any] = 'x = 3\ny = 5' _lowercase : Any = {} _lowercase : int = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _lowercase : List[str] = 'text = f\'This is x: {x}.\'' _lowercase : str = {'x': 3} _lowercase : Dict = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'text': 'This is x: 3.'} ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' _lowercase : Tuple = 'if x <= 3:\n y = 2\nelse:\n y = 5' _lowercase : Tuple = {'x': 3} _lowercase : Optional[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 2} ) _lowercase : Any = {'x': 8} _lowercase : List[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 8, 'y': 5} ) def __UpperCAmelCase ( self : str ) -> Optional[Any]: '''simple docstring''' _lowercase : Tuple = 'test_list = [x, add_two(x)]' _lowercase : Tuple = {'x': 3} _lowercase : Union[str, Any] = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , [3, 5] ) self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_list': [3, 5]} ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: '''simple docstring''' _lowercase : Tuple = 'y = x' _lowercase : Dict = {'x': 3} _lowercase : Optional[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ ) assert result == 3 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 3} ) def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]' _lowercase : int = {'x': 3} _lowercase : Any = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ ) assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_list': [3, 5]} ) _lowercase : int = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' _lowercase : Optional[Any] = {'x': 3} _lowercase : Dict = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ ) assert result == 5 self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def __UpperCAmelCase ( self : Any ) -> str: '''simple docstring''' _lowercase : List[str] = 'x = 0\nfor i in range(3):\n x = i' _lowercase : int = {} _lowercase : Dict = evaluate(UpperCamelCase_ , {'range': range} , state=UpperCamelCase_ ) assert result == 2 self.assertDictEqual(UpperCamelCase_ , {'x': 2, 'i': 2} )
4
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
1
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class lowerCamelCase__ ( A , A , A , unittest.TestCase ): '''simple docstring''' A_ = StableDiffusionControlNetImgaImgPipeline A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCAmelCase ( self : str ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) _lowercase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) _lowercase : List[str] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) _lowercase : Optional[int] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) _lowercase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _lowercase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowercase : Any = CLIPTextModel(UpperCamelCase_ ) _lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowercase : List[Any] = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=0 ) -> Dict: '''simple docstring''' if str(UpperCamelCase_ ).startswith('mps' ): _lowercase : Any = torch.manual_seed(UpperCamelCase_ ) else: _lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) _lowercase : Union[str, Any] = 2 _lowercase : Optional[Any] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ) _lowercase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) _lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowercase : str = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) ) _lowercase : Optional[int] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = StableDiffusionControlNetImgaImgPipeline A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __UpperCAmelCase ( self : Optional[Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) _lowercase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(UpperCamelCase_ : Optional[int] ): if isinstance(UpperCamelCase_ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) _lowercase : List[str] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase_ ) torch.manual_seed(0 ) _lowercase : str = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase_ ) torch.manual_seed(0 ) _lowercase : Optional[int] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) _lowercase : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _lowercase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowercase : Optional[int] = CLIPTextModel(UpperCamelCase_ ) _lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowercase : Tuple = MultiControlNetModel([controlneta, controlneta] ) _lowercase : Any = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=0 ) -> int: '''simple docstring''' if str(UpperCamelCase_ ).startswith('mps' ): _lowercase : List[Any] = torch.manual_seed(UpperCamelCase_ ) else: _lowercase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) _lowercase : List[Any] = 2 _lowercase : Optional[Any] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ), ] _lowercase : int = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) _lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowercase : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) ) _lowercase : str = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def __UpperCAmelCase ( self : Tuple ) -> str: '''simple docstring''' _lowercase : Dict = self.get_dummy_components() _lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) _lowercase : Tuple = 10.0 _lowercase : List[Any] = 4 _lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : Union[str, Any] = steps _lowercase : Optional[int] = scale _lowercase : Optional[int] = pipe(**UpperCamelCase_ )[0] _lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : Tuple = steps _lowercase : Any = scale _lowercase : List[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] _lowercase : int = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : Optional[int] = steps _lowercase : Any = scale _lowercase : Dict = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] _lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : List[Any] = steps _lowercase : List[Any] = scale _lowercase : Optional[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __UpperCAmelCase ( self : List[str] ) -> Dict: '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __UpperCAmelCase ( self : Dict ) -> List[Any]: '''simple docstring''' _lowercase : int = self.get_dummy_components() _lowercase : Any = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase_ ) except NotImplementedError: pass @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : str ) -> Optional[Any]: '''simple docstring''' _lowercase : Union[str, Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' ) _lowercase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowercase : Union[str, Any] = 'evil space-punk bird' _lowercase : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) ) _lowercase : str = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) ) _lowercase : str = pipe( UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , num_inference_steps=50 , strength=0.6 , ) _lowercase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) _lowercase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' ) assert np.abs(expected_image - image ).max() < 9E-2
4
'''simple docstring''' from __future__ import annotations import requests def __UpperCamelCase ( _lowercase ) -> dict: _lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_lowercase ).json() def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]: _lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories] return [get_hackernews_story(_lowercase ) for story_id in story_ids] def __UpperCamelCase ( _lowercase = 10 ) -> str: _lowercase : Tuple = hackernews_top_stories(_lowercase ) return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
1
'''simple docstring''' def __UpperCamelCase ( ) -> int: return 1 def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowercase ) def __UpperCamelCase ( _lowercase ) -> int: return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowercase ) def __UpperCamelCase ( _lowercase = 200 ) -> int: return two_pound(_lowercase ) if __name__ == "__main__": print(solution(int(input().strip())))
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Dict ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """megatron-bert""" def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Optional[Any] = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : Optional[Any] = use_cache
4
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __UpperCamelCase ( ) -> Any: _lowercase : int = ArgumentParser( description=( 'PyTorch TPU distributed training launch ' 'helper utility that will spawn up ' 'multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores', type=_lowercase, default=1, help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script', type=_lowercase, help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ), ) # rest from the training program parser.add_argument('training_script_args', nargs=_lowercase ) return parser.parse_args() def __UpperCamelCase ( ) -> Optional[Any]: _lowercase : str = parse_args() # Import training_script as a module. _lowercase : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowercase : Union[str, Any] = script_fpath.stem _lowercase : Optional[Any] = importlib.import_module(_lowercase ) # Patch sys.argv _lowercase : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores ) if __name__ == "__main__": main()
4
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Tuple = args.pruning_method _lowercase : int = args.threshold _lowercase : str = args.model_name_or_path.rstrip('/' ) _lowercase : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) ) _lowercase : List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _lowercase : Optional[int] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _lowercase : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _lowercase : Dict = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase ) _lowercase : Optional[Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _lowercase : Optional[Any] = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase ) _lowercase : str = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _lowercase : str = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase ) _lowercase : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _lowercase : Optional[int] = name[:-6] _lowercase : List[str] = model[f'''{prefix_}mask_scores'''] _lowercase , _lowercase : Union[str, Any] = -0.1, 1.1 _lowercase : str = torch.sigmoid(_lowercase ) _lowercase : int = s * (r - l) + l _lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 ) _lowercase : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _lowercase : List[Any] = os.path.join( os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' ) if not os.path.isdir(_lowercase ): shutil.copytree(_lowercase, _lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) _A : List[Any] =parser.parse_args() main(args)
4
1
'''simple docstring''' from __future__ import annotations import math def __UpperCamelCase ( _lowercase, _lowercase ) -> float: _lowercase : str = u for i in range(1, _lowercase ): _lowercase : str = temp * (u - i) return temp def __UpperCamelCase ( ) -> None: _lowercase : Any = int(input('enter the numbers of values: ' ) ) _lowercase : list[list[float]] = [] for _ in range(_lowercase ): y.append([] ) for i in range(_lowercase ): for j in range(_lowercase ): y[i].append(_lowercase ) _lowercase : List[str] = 0 print('enter the values of parameters in a list: ' ) _lowercase : Dict = list(map(_lowercase, input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(_lowercase ): _lowercase : Union[str, Any] = float(input() ) _lowercase : str = int(input('enter the value to interpolate: ' ) ) _lowercase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, _lowercase ): for j in range(n - i ): _lowercase : Tuple = y[j + 1][i - 1] - y[j][i - 1] _lowercase : List[str] = y[0][0] for i in range(1, _lowercase ): summ += (ucal(_lowercase, _lowercase ) * y[0][i]) / math.factorial(_lowercase ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
4
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Dict=[10, 20, 30, 40] , UpperCamelCase_ : Any=[1, 1, 2, 1] , UpperCamelCase_ : Any=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=None , ) -> int: '''simple docstring''' _lowercase : List[Any] = parent _lowercase : Union[str, Any] = batch_size _lowercase : Union[str, Any] = image_size _lowercase : Optional[int] = num_channels _lowercase : str = embeddings_size _lowercase : Optional[Any] = hidden_sizes _lowercase : List[str] = depths _lowercase : List[str] = is_training _lowercase : Union[str, Any] = use_labels _lowercase : str = hidden_act _lowercase : Dict = num_labels _lowercase : str = scope _lowercase : Any = len(UpperCamelCase_ ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' _lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : Optional[Any] = None if self.use_labels: _lowercase : str = ids_tensor([self.batch_size] , self.num_labels ) _lowercase : Any = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Any ) -> Optional[int]: '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = TFResNetModel(config=UpperCamelCase_ ) _lowercase : Tuple = model(UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> Any: '''simple docstring''' _lowercase : Any = self.num_labels _lowercase : Tuple = TFResNetForImageClassification(UpperCamelCase_ ) _lowercase : Optional[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Any ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Any = config_and_inputs _lowercase : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): '''simple docstring''' A_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () A_ = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self : str ) -> List[Any]: '''simple docstring''' _lowercase : Any = TFResNetModelTester(self ) _lowercase : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Optional[Any] ) -> str: '''simple docstring''' return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def __UpperCAmelCase ( self : Any ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : Dict ) -> Any: '''simple docstring''' _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Union[str, Any] = model_class(UpperCamelCase_ ) _lowercase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : List[str] = [*signature.parameters.keys()] _lowercase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ): _lowercase : Union[str, Any] = model_class(UpperCamelCase_ ) _lowercase : Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowercase : Dict = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Tuple = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowercase : Optional[Any] = layer_type _lowercase : Dict = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : Optional[Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> str: '''simple docstring''' _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : str ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFResNetModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def __UpperCamelCase ( ) -> int: _lowercase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' _lowercase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowercase : Any = self.default_image_processor _lowercase : Tuple = prepare_img() _lowercase : str = image_processor(images=UpperCamelCase_ , return_tensors='tf' ) # forward pass _lowercase : Optional[Any] = model(**UpperCamelCase_ ) # verify the logits _lowercase : Tuple = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) _lowercase : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase_ , atol=1E-4 ) )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _A : Optional[int] =[ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : str = None , UpperCamelCase_ : list = None ) -> Optional[int]: '''simple docstring''' _lowercase : int = None _lowercase : int = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) _lowercase : Optional[Any] = os.path.abspath('examples' ) for item in os.listdir(UpperCamelCase_ ): if item not in EXCLUDE_EXAMPLES: _lowercase : List[str] = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) if os.path.isfile(UpperCamelCase_ ) and ".py" in item_path: with self.subTest( tested_script=UpperCamelCase_ , feature_script=UpperCamelCase_ , tested_section='main()' if parser_only else 'training_function()' , ): _lowercase : Tuple = compare_against_test( os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Optional[int] = '\n'.join(UpperCamelCase_ ) if special_strings is not None: for string in special_strings: _lowercase : Union[str, Any] = diff.replace(UpperCamelCase_ , '' ) self.assertEqual(UpperCamelCase_ , '' ) def __UpperCAmelCase ( self : List[str] ) -> Any: '''simple docstring''' self.one_complete_example('complete_nlp_example.py' , UpperCamelCase_ ) self.one_complete_example('complete_nlp_example.py' , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Optional[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) _lowercase : str = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.one_complete_example('complete_cv_example.py' , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = False @classmethod def __UpperCAmelCase ( cls : Optional[int] ) -> Tuple: '''simple docstring''' super().setUpClass() _lowercase : Dict = tempfile.mkdtemp() _lowercase : str = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) _lowercase : Union[str, Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def __UpperCAmelCase ( cls : List[Any] ) -> Any: '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __UpperCAmelCase ( self : str ) -> List[Any]: '''simple docstring''' _lowercase : Optional[int] = F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def __UpperCAmelCase ( self : str ) -> str: '''simple docstring''' _lowercase : int = F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() _lowercase : str = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def __UpperCAmelCase ( self : Any ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} '''.split() _lowercase : Any = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ ) self.assertNotIn('epoch 0:' , UpperCamelCase_ ) self.assertIn('epoch 1:' , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : List[str] = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} '''.split() _lowercase : int = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ ) if torch.cuda.is_available(): _lowercase : Tuple = torch.cuda.device_count() else: _lowercase : Any = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , UpperCamelCase_ ) self.assertIn('epoch 1:' , UpperCamelCase_ ) else: self.assertIn('epoch 0:' , UpperCamelCase_ ) self.assertIn('epoch 1:' , UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : int = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): _lowercase : Any = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ ) _lowercase : Optional[Any] = re.findall('({.+})' , UpperCamelCase_ ) _lowercase : Optional[int] = [r for r in results if 'accuracy' in r][-1] _lowercase : Dict = ast.literal_eval(UpperCamelCase_ ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : List[Any] = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def __UpperCAmelCase ( self : Dict ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: _lowercase : Optional[int] = F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , 'tracking' ) ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def __UpperCAmelCase ( self : Any ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
4
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _A : List[Any] =2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _A : Optional[Any] =5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _A : Any =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def __UpperCamelCase ( _lowercase, _lowercase ) -> tuple[str, float]: _lowercase : List[Any] = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] ) return (item, float(_lowercase )) def __UpperCamelCase ( _lowercase, _lowercase ) -> tuple[str, str]: _lowercase : List[str] = random.randint(0, len(_lowercase ) - 1 ) _lowercase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] _lowercase : str = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : int = list(_lowercase ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: _lowercase : List[str] = random.choice(_lowercase ) return "".join(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, ) -> list[str]: _lowercase : Tuple = [] # Generate more children proportionally to the fitness score. _lowercase : List[Any] = int(parent_a[1] * 100 ) + 1 _lowercase : Any = 10 if child_n >= 10 else child_n for _ in range(_lowercase ): _lowercase : Dict = population_score[random.randint(0, _lowercase )][0] _lowercase , _lowercase : str = crossover(parent_a[0], _lowercase ) # Append new string to the population list. pop.append(mutate(_lowercase, _lowercase ) ) pop.append(mutate(_lowercase, _lowercase ) ) return pop def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: _lowercase : Optional[Any] = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(_lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. _lowercase : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _lowercase : List[Any] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(_lowercase ) # Generate random starting population. _lowercase : List[Any] = [] for _ in range(_lowercase ): population.append(''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. _lowercase , _lowercase : str = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _lowercase : List[Any] = [evaluate(_lowercase, _lowercase ) for item in population] # Check if there is a matching evolution. _lowercase : Optional[Any] = sorted(_lowercase, key=lambda _lowercase : x[1], reverse=_lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _lowercase : List[str] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_lowercase ) # Normalize population score to be between 0 and 1. _lowercase : List[str] = [ (item, score / len(_lowercase )) for item, score in population_score ] # This is selection for i in range(_lowercase ): population.extend(select(population_score[int(_lowercase )], _lowercase, _lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowercase ) > N_POPULATION: break if __name__ == "__main__": _A : List[Any] =( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) _A : Optional[Any] =list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'รจรฉรฒร โ‚ฌรน=)(&%$ยฃ/\\''' ) _A , _A , _A : List[Any] =basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
4
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
1
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL _A : Optional[Any] =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=False, ) -> Tuple: output_path.parent.mkdir(parents=_lowercase, exist_ok=_lowercase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, use_external_data_format=_lowercase, enable_onnx_checker=_lowercase, opset_version=_lowercase, ) else: export( _lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, opset_version=_lowercase, ) @torch.no_grad() def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase = False ) -> List[str]: _lowercase : Optional[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowercase : int = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: _lowercase : List[str] = 'cpu' _lowercase : Optional[Any] = Path(_lowercase ) # VAE DECODER _lowercase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' ) _lowercase : List[Any] = vae_decoder.config.latent_channels # forward only through the decoder part _lowercase : Any = vae_decoder.decode onnx_export( _lowercase, model_args=( torch.randn(1, _lowercase, 25, 25 ).to(device=_lowercase, dtype=_lowercase ), False, ), output_path=output_path / 'vae_decoder' / 'model.onnx', ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, }, opset=_lowercase, ) del vae_decoder if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=1_4, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') _A : int =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('''SD: Done: ONNX''')
4
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __UpperCamelCase ( _lowercase ) -> None: _lowercase , _lowercase : List[Any] = analyze_text(_lowercase ) _lowercase : Any = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. _lowercase : Union[str, Any] = sum(single_char_strings.values() ) # one length string _lowercase : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _lowercase : Any = single_char_strings[ch] _lowercase : int = my_str / all_sum my_fir_sum += prob * math.loga(_lowercase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _lowercase : str = sum(two_char_strings.values() ) _lowercase : str = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _lowercase : Optional[Any] = cha + cha if sequence in two_char_strings: _lowercase : int = two_char_strings[sequence] _lowercase : Optional[int] = int(_lowercase ) / all_sum my_sec_sum += prob * math.loga(_lowercase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]: _lowercase : Optional[Any] = Counter() # type: ignore _lowercase : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(_lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __UpperCamelCase ( ) -> List[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
4
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Optional[int] ='''โ–''' _A : str ={'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} _A : Any ={ '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } _A : Optional[int] ={ '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } _A : Dict ={ '''ernie-m-base''': 5_1_4, '''ernie-m-large''': 5_1_4, } _A : Optional[Any] ={ '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["input_ids"] A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_INIT_CONFIGURATION A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = RESOURCE_FILES_NAMES def __init__( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[str]="utf8" , UpperCamelCase_ : Optional[int]="[UNK]" , UpperCamelCase_ : Dict="[SEP]" , UpperCamelCase_ : List[str]="[PAD]" , UpperCamelCase_ : Dict="[CLS]" , UpperCamelCase_ : Optional[int]="[MASK]" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[int] , ) -> None: '''simple docstring''' _lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , vocab_file=UpperCamelCase_ , encoding=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = sentencepiece_model_ckpt _lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase_ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _lowercase : List[Any] = self.load_vocab(filepath=UpperCamelCase_ ) else: _lowercase : str = {self.sp_model.id_to_piece(UpperCamelCase_ ): id for id in range(self.sp_model.get_piece_size() )} _lowercase : List[str] = {v: k for k, v in self.vocab.items()} def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' if text is None: return None _lowercase : Optional[Any] = self.tokenize(UpperCamelCase_ ) _lowercase , _lowercase : int = '', [] for i, ch in enumerate(UpperCamelCase_ ): if ch in self.SP_CHAR_MAPPING: _lowercase : str = self.SP_CHAR_MAPPING.get(UpperCamelCase_ ) else: _lowercase : Optional[int] = unicodedata.normalize('NFKC' , UpperCamelCase_ ) if self.is_whitespace(UpperCamelCase_ ): continue normalized_text += ch char_mapping.extend([i] * len(UpperCamelCase_ ) ) _lowercase , _lowercase , _lowercase : Any = normalized_text, [], 0 if self.do_lower_case: _lowercase : Union[str, Any] = text.lower() for token in split_tokens: if token[:1] == "โ–": _lowercase : Tuple = token[1:] _lowercase : Optional[Any] = text[offset:].index(UpperCamelCase_ ) + offset _lowercase : Any = start + len(UpperCamelCase_ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) _lowercase : Tuple = end return token_mapping @property def __UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' return len(self.vocab ) def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Optional[int] = self.__dict__.copy() _lowercase : List[str] = None return state def __setstate__( self : List[Any] , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowercase : Tuple = {} _lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Tuple: '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase_ , UpperCamelCase_ ) for c in text) ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Optional[int]=64 , UpperCamelCase_ : Tuple=0.1 ) -> Union[str, Any]: '''simple docstring''' if self.sp_model_kwargs.get('enable_sampling' ) is True: _lowercase : List[Any] = True if self.sp_model_kwargs.get('alpha' ) is not None: _lowercase : Optional[int] = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: _lowercase : List[Any] = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: _lowercase : Any = self.sp_model.EncodeAsPieces(UpperCamelCase_ ) else: _lowercase : Tuple = self.sp_model.SampleEncodeAsPieces(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : Any = [] for pi, piece in enumerate(UpperCamelCase_ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(UpperCamelCase_ ) and pi != 0: new_pieces.append(UpperCamelCase_ ) continue else: continue _lowercase : str = 0 for i, chunk in enumerate(UpperCamelCase_ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(UpperCamelCase_ ) or self.is_punct(UpperCamelCase_ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(UpperCamelCase_ ) _lowercase : Tuple = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowercase : Any = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowercase : Any = i if len(UpperCamelCase_ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> Optional[int]: '''simple docstring''' _lowercase : str = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ' ' ).strip() return out_string def __UpperCAmelCase ( self : str , UpperCamelCase_ : str ) -> Any: '''simple docstring''' _lowercase : str = self.convert_ids_to_tokens(UpperCamelCase_ ) _lowercase : int = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ' ' ).strip() return out_string def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> int: '''simple docstring''' return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[str] ) -> str: '''simple docstring''' return self.reverse_vocab.get(UpperCamelCase_ , self.unk_token ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : int=None ) -> str: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase : List[Any] = [self.cls_token_id] _lowercase : Tuple = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=None ) -> int: '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=False ) -> Tuple: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(UpperCamelCase_ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(UpperCamelCase_ ) + 1) + [1] * (len(UpperCamelCase_ ) + 3) def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] ) -> List[str]: '''simple docstring''' if char in ",;:.?!~๏ผŒ๏ผ›๏ผšใ€‚๏ผŸ๏ผใ€Šใ€‹ใ€ใ€‘": return True return False def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[Any] ) -> int: '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(UpperCamelCase_ ) == 1: _lowercase : Any = unicodedata.category(UpperCamelCase_ ) if cat == "Zs": return True return False def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any ) -> Optional[int]: '''simple docstring''' _lowercase : List[str] = {} with io.open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(UpperCamelCase_ ): _lowercase : List[str] = line.rstrip('\n' ) _lowercase : Optional[Any] = int(UpperCamelCase_ ) return token_to_idx def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' _lowercase : Optional[int] = 0 if os.path.isdir(UpperCamelCase_ ): _lowercase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: _lowercase : Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) _lowercase : Optional[Any] = token_index writer.write(token + '\n' ) index += 1 _lowercase : int = os.path.join(UpperCamelCase_ , 'sentencepiece.bpe.model' ) with open(UpperCamelCase_ , 'wb' ) as fi: _lowercase : int = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (vocab_file,)
4
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowercase : List[Any] = 'The dog is cute and lives in the garden house' _lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] ) _lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowercase : Tuple = jnp.array( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) _lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state'] self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
4
1
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __UpperCamelCase ( ) -> List[str]: _lowercase : int = argparse.ArgumentParser() parser.add_argument('--model_ckpt', type=_lowercase, default='microsoft/unixcoder-base-nine' ) parser.add_argument('--num_epochs', type=_lowercase, default=5 ) parser.add_argument('--batch_size', type=_lowercase, default=6 ) parser.add_argument('--gradient_accumulation_steps', type=_lowercase, default=1 ) parser.add_argument('--freeze', type=_lowercase, default=_lowercase ) parser.add_argument('--learning_rate', type=_lowercase, default=5E-4 ) parser.add_argument('--seed', type=_lowercase, default=0 ) parser.add_argument('--lr_scheduler_type', type=_lowercase, default='cosine' ) parser.add_argument('--num_warmup_steps', type=_lowercase, default=10 ) parser.add_argument('--weight_decay', type=_lowercase, default=0.0_1 ) parser.add_argument('--output_dir', type=_lowercase, default='./results' ) return parser.parse_args() _A : Optional[Any] =load('''accuracy''') def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase , _lowercase : List[str] = eval_pred _lowercase : Dict = np.argmax(_lowercase, axis=1 ) return metric.compute(predictions=_lowercase, references=_lowercase ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : str , UpperCamelCase_ : int ) -> None: '''simple docstring''' super().__init__() _lowercase : Dict = trainer def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , **UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' if control.should_evaluate: _lowercase : Union[str, Any] = deepcopy(UpperCamelCase_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' ) return control_copy def __UpperCamelCase ( ) -> Optional[Any]: _lowercase : Any = get_args() set_seed(args.seed ) _lowercase : Dict = load_dataset('codeparrot/codecomplex', split='train' ) _lowercase : int = dataset.train_test_split(test_size=0.2 ) _lowercase : Union[str, Any] = train_test['test'].train_test_split(test_size=0.5 ) _lowercase : Dict = DatasetDict( { 'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test'], } ) print('Loading tokenizer and model' ) _lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt ) _lowercase : Tuple = tokenizer.eos_token _lowercase : Tuple = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) _lowercase : Union[str, Any] = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _lowercase : Tuple = False _lowercase : Dict = ClassLabel(num_classes=7, names=list(set(train_test_validation['train']['complexity'] ) ) ) def tokenize(_lowercase ): _lowercase : Tuple = tokenizer(example['src'], truncation=_lowercase, max_length=1024 ) _lowercase : List[str] = labels.straint(example['complexity'] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _lowercase : List[str] = train_test_validation.map( _lowercase, batched=_lowercase, remove_columns=train_test_validation['train'].column_names, ) _lowercase : Any = DataCollatorWithPadding(tokenizer=_lowercase ) _lowercase : Any = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='epoch', save_strategy='epoch', logging_strategy='epoch', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.0_1, metric_for_best_model='accuracy', run_name='complexity-java', report_to='wandb', ) _lowercase : Union[str, Any] = Trainer( model=_lowercase, args=_lowercase, train_dataset=tokenized_datasets['train'], eval_dataset=tokenized_datasets['valid'], tokenizer=_lowercase, data_collator=_lowercase, compute_metrics=_lowercase, ) print('Training...' ) trainer.add_callback(CustomCallback(_lowercase ) ) trainer.train() if __name__ == "__main__": main()
4
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
1
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def __UpperCamelCase ( _lowercase ) -> List[str]: _lowercase : str = [False] * len(_lowercase ) _lowercase : Dict = [-1] * len(_lowercase ) def dfs(_lowercase, _lowercase ): _lowercase : Dict = True _lowercase : Optional[int] = c for u in graph[v]: if not visited[u]: dfs(_lowercase, 1 - c ) for i in range(len(_lowercase ) ): if not visited[i]: dfs(_lowercase, 0 ) for i in range(len(_lowercase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A : Union[str, Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
4
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A : List[str] ='''pt''' elif is_tf_available(): _A : Tuple ='''tf''' else: _A : Optional[int] ='''jax''' class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = ByTaTokenizer A_ = False def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().setUp() _lowercase : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' _lowercase : Dict = [] for i in range(len(UpperCamelCase_ ) ): try: _lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) ) _lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) ) if max_length is not None and len(UpperCamelCase_ ) > max_length: _lowercase : List[Any] = toks[:max_length] if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0: while len(UpperCamelCase_ ) < min_length: _lowercase : Tuple = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Dict = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) if " " not in output_txt and len(UpperCamelCase_ ) > 1: _lowercase : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ ) ) if with_prefix_space: _lowercase : Union[str, Any] = ' ' + output_txt _lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) return output_txt, output_ids def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' _lowercase : List[str] = self.ta_base_tokenizer _lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) _lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = self.ta_base_tokenizer _lowercase : Tuple = 'Unicode โ‚ฌ.' _lowercase : List[Any] = tokenizer(UpperCamelCase_ ) _lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : List[str] = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'Unicode โ‚ฌ.</s>' ) _lowercase : Any = tokenizer('e รจ รฉ รช รซ' ) _lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , UpperCamelCase_ ) # decoding _lowercase : Tuple = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , 'e รจ รฉ รช รซ</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e รจ รฉ รช รซ' ) ) , 'e รจ รฉ รช รซ</s>' ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = self.ta_base_tokenizer _lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) if FRAMEWORK != "jax": _lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __UpperCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = self.ta_base_tokenizer _lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , UpperCamelCase_ ) self.assertIn('attention_mask' , UpperCamelCase_ ) self.assertNotIn('decoder_input_ids' , UpperCamelCase_ ) self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> int: '''simple docstring''' _lowercase : Tuple = self.ta_base_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : str = tokenizer( text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' _lowercase : str = self.ta_base_tokenizer _lowercase : str = ['A long paragraph for summarization. </s>'] _lowercase : Optional[int] = ['Summary of the text. </s>'] # fmt: off _lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] ) self.assertEqual(UpperCamelCase_ , batch['labels'][0] ) def __UpperCAmelCase ( self : List[str] ) -> int: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowercase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[Any] = tempfile.mkdtemp() _lowercase : Any = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) shutil.rmtree(UpperCamelCase_ ) _lowercase : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) _lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) _lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : int = json.load(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(UpperCamelCase_ ) _lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )] _lowercase : Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : int = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )] _lowercase : Tuple = tokenizer_class.from_pretrained( UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __UpperCAmelCase ( self : List[str] ) -> str: '''simple docstring''' _lowercase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) _lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __UpperCAmelCase ( self : Optional[int] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' pass def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' pass def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] _lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ) -> str: '''simple docstring''' _lowercase : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowercase : Optional[int] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _lowercase : Optional[int] = 0 _lowercase : int = tokenizer.convert_ids_to_tokens( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for attr in attributes_list: setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] ) setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
4
1
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _lowercase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) _lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' ) _lowercase : Any = tokenizer('Hello there' , return_tensors='np' ).input_ids _lowercase : Any = tokenizer('Hi I am' , return_tensors='np' ).input_ids _lowercase : Union[str, Any] = shift_tokens_right(UpperCamelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _lowercase : List[Any] = model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ).logits _lowercase : Dict = optax.softmax_cross_entropy(UpperCamelCase_ , onehot(UpperCamelCase_ , logits.shape[-1] ) ).mean() _lowercase : Tuple = -(labels.shape[-1] * loss.item()) _lowercase : Optional[int] = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
4
'''simple docstring''' _A : Dict =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _A : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
4
1
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> list: _lowercase : Any = [] _lowercase , _lowercase : Optional[int] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _lowercase : Tuple = result + left + right return input_list def __UpperCamelCase ( _lowercase ) -> list: if len(_lowercase ) <= 1: return input_list _lowercase : List[str] = list(_lowercase ) # iteration for two-way merging _lowercase : List[str] = 2 while p <= len(_lowercase ): # getting low, high and middle value for merge-sort of single list for i in range(0, len(_lowercase ), _lowercase ): _lowercase : List[str] = i _lowercase : str = i + p - 1 _lowercase : List[str] = (low + high + 1) // 2 _lowercase : str = merge(_lowercase, _lowercase, _lowercase, _lowercase ) # final merge of last two parts if p * 2 >= len(_lowercase ): _lowercase : Tuple = i _lowercase : Dict = merge(_lowercase, 0, _lowercase, len(_lowercase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _A : Dict =input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": _A : Optional[int] =[] else: _A : List[Any] =[int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
4
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : int = torch.exp(_lowercase ) _lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i) _lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i) return torch.log(_lowercase ) - B / A class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__() _lowercase : int = config.output_attentions _lowercase : int = config.output_hidden_states _lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] ) _lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int: '''simple docstring''' if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): _lowercase : Optional[Any] = x else: _lowercase : Optional[int] = x def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]: '''simple docstring''' _lowercase : int = () _lowercase : List[Any] = () _lowercase : Tuple = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowercase : Optional[int] = all_hidden_states + (hidden_states,) _lowercase : str = layer_module( UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = layer_outputs[0] if self.output_attentions: _lowercase : Tuple = all_attentions + (layer_outputs[1],) _lowercase : Optional[int] = (hidden_states,) if self.output_hidden_states: _lowercase : str = current_outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[int] = current_outputs + (all_attentions,) _lowercase : List[Any] = self.highway[i](UpperCamelCase_ ) # logits, pooled_output if not self.training: _lowercase : Dict = highway_exit[0] _lowercase : Tuple = entropy(UpperCamelCase_ ) _lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowercase : str = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase_ , i + 1 ) else: _lowercase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowercase : str = all_hidden_states + (hidden_states,) _lowercase : Optional[Any] = (hidden_states,) if self.output_hidden_states: _lowercase : Dict = outputs + (all_hidden_states,) if self.output_attentions: _lowercase : Optional[Any] = outputs + (all_attentions,) _lowercase : Optional[int] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : int = config _lowercase : int = BertEmbeddings(UpperCamelCase_ ) _lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ ) _lowercase : Any = BertPooler(UpperCamelCase_ ) self.init_weights() def __UpperCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' return self.embeddings.word_embeddings def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' _lowercase : Optional[Any] = value def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _lowercase : Any = input_ids.size() elif inputs_embeds is not None: _lowercase : Any = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if encoder_attention_mask is None: _lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: _lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowercase : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowercase : int = encoder_attention_mask[:, None, None, :] _lowercase : str = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) _lowercase : Dict = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) _lowercase : List[Any] = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) _lowercase : int = encoder_outputs[0] _lowercase : str = self.pooler(UpperCamelCase_ ) _lowercase : List[Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _lowercase : Any = message _lowercase : Dict = exit_layer # start from 1! class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' super().__init__() _lowercase : Optional[Any] = BertPooler(UpperCamelCase_ ) _lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob ) _lowercase : int = nn.Linear(config.hidden_size , config.num_labels ) def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' _lowercase : str = encoder_outputs[0] _lowercase : int = self.pooler(UpperCamelCase_ ) # "return" pooler_output # BertModel _lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowercase : Dict = bmodel_output[1] _lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ ) _lowercase : str = self.classifier(UpperCamelCase_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , A , ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]: '''simple docstring''' super().__init__(UpperCamelCase_ ) _lowercase : Dict = config.num_labels _lowercase : Any = config.num_hidden_layers _lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ ) _lowercase : Any = nn.Dropout(config.hidden_dropout_prob ) _lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple: '''simple docstring''' _lowercase : Union[str, Any] = self.num_layers try: _lowercase : Tuple = self.bert( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowercase : List[Any] = outputs[1] _lowercase : int = self.dropout(UpperCamelCase_ ) _lowercase : Optional[int] = self.classifier(UpperCamelCase_ ) _lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowercase : Union[str, Any] = e.message _lowercase : Any = e.exit_layer _lowercase : Optional[int] = outputs[0] if not self.training: _lowercase : Union[str, Any] = entropy(UpperCamelCase_ ) _lowercase : Tuple = [] _lowercase : Tuple = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowercase : Tuple = MSELoss() _lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Union[str, Any] = CrossEntropyLoss() _lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowercase : Optional[Any] = [] for highway_exit in outputs[-1]: _lowercase : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowercase : Union[str, Any] = MSELoss() _lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowercase : Dict = CrossEntropyLoss() _lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: _lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowercase : Optional[Any] = (loss,) + outputs if not self.training: _lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowercase : Dict = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
4
1
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( _lowercase ) -> float: _lowercase : Any = 0.0_0 _lowercase : Any = 0 for resistor in resistors: if resistor <= 0: _lowercase : Optional[Any] = f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(_lowercase ) first_sum += 1 / float(_lowercase ) index += 1 return 1 / first_sum def __UpperCamelCase ( _lowercase ) -> float: _lowercase : str = 0.0_0 _lowercase : Optional[Any] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _lowercase : str = f'''Resistor at index {index} has a negative value!''' raise ValueError(_lowercase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : int ) -> Any: '''simple docstring''' _lowercase : List[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Tuple = [2, 4, 6, 8, 10, 12] _lowercase : Optional[Any] = 100 self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 ) def __UpperCAmelCase ( self : int ) -> int: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' ) def __UpperCAmelCase ( self : int ) -> List[str]: '''simple docstring''' self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' self.assertRaisesRegex( UpperCamelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
4
1
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _A : int ='''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' _A : Tuple ='''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' _A : Dict =r''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , ) def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' _lowercase : List[str] = 0.0 for i, j in zip(UpperCamelCase_ , UpperCamelCase_ ): n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ , UpperCamelCase_ ) else 0.0 _lowercase : int = n_correct / len(UpperCamelCase_ ) return { "accuracy": accuracy, }
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Tuple =['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """markuplm""" def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : List[Any] = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Optional[Any] = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : List[Any] = type_vocab_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Optional[Any] = position_embedding_type _lowercase : str = use_cache _lowercase : str = classifier_dropout # additional properties _lowercase : int = max_depth _lowercase : Dict = max_xpath_tag_unit_embeddings _lowercase : str = max_xpath_subs_unit_embeddings _lowercase : List[str] = tag_pad_id _lowercase : Optional[int] = subs_pad_id _lowercase : Any = xpath_unit_hidden_size
4
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : int ={ '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } _A : Any ={ '''distilbert-base-uncased''': 5_1_2, '''distilbert-base-uncased-distilled-squad''': 5_1_2, '''distilbert-base-cased''': 5_1_2, '''distilbert-base-cased-distilled-squad''': 5_1_2, '''distilbert-base-german-cased''': 5_1_2, '''distilbert-base-multilingual-cased''': 5_1_2, } _A : List[str] ={ '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_INIT_CONFIGURATION A_ = ["""input_ids""", """attention_mask"""] A_ = DistilBertTokenizer def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True , UpperCamelCase_ : Any="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Optional[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[int] , ) -> Dict: '''simple docstring''' super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars ): _lowercase : int = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) ) _lowercase : str = do_lower_case _lowercase : Dict = strip_accents _lowercase : Optional[Any] = tokenize_chinese_chars _lowercase : Any = normalizer_class(**UpperCamelCase_ ) _lowercase : Optional[Any] = do_lower_case def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str]=None ) -> Optional[int]: '''simple docstring''' _lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _lowercase : List[str] = [self.sep_token_id] _lowercase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' _lowercase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase, _lowercase ) -> list: _lowercase : List[str] = word.split() def justify(_lowercase, _lowercase, _lowercase ) -> str: _lowercase : Dict = max_width - width _lowercase : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowercase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowercase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowercase : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _lowercase : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _lowercase : str = [] _lowercase : list[str] = [] _lowercase : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase, _lowercase, _lowercase ) ) # reset new line and new width _lowercase , _lowercase : Optional[Any] = [word], len(_lowercase ) _lowercase : Optional[int] = max_width - width - len(_lowercase ) answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
1
'''simple docstring''' import os def __UpperCamelCase ( ) -> List[Any]: _lowercase : Optional[int] = os.path.dirname(os.path.realpath(_lowercase ) ) _lowercase : Tuple = os.path.join(_lowercase, 'triangle.txt' ) with open(_lowercase ) as f: _lowercase : Dict = f.readlines() _lowercase : Tuple = [] for line in triangle: _lowercase : List[Any] = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(_lowercase ) ) a.append(_lowercase ) for i in range(1, len(_lowercase ) ): for j in range(len(a[i] ) ): _lowercase : str = a[i - 1][j] if j != len(a[i - 1] ) else 0 _lowercase : Optional[int] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_lowercase, _lowercase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
4
'''simple docstring''' import os from collections.abc import Iterator def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): _lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase, _lowercase ).lstrip('./' ) def __UpperCamelCase ( _lowercase ) -> List[str]: return f'''{i * " "}*''' if i else "\n##" def __UpperCamelCase ( _lowercase, _lowercase ) -> str: _lowercase : Optional[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' ) return new_path def __UpperCamelCase ( _lowercase = "." ) -> None: _lowercase : Dict = '' for filepath in sorted(good_file_paths(_lowercase ) ): _lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase ) if filepath != old_path: _lowercase : Dict = print_path(_lowercase, _lowercase ) _lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 _lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' ) _lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0] print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
4
1
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant โ„ (H bar), speed of light C, value of # Pi and the function _A : Tuple =1.0_54_57_18_17e-34 # unit of โ„ : J * s _A : str =3e8 # unit of c : m * s^-1 def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: _lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: _lowercase : Optional[Any] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _lowercase : int = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _A : List[str] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""pixel_values"""] def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : str , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Union[str, Any] = size if size is not None else {'shortest_edge': 384} _lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) _lowercase : Any = do_resize _lowercase : List[str] = size # Default value set here for backwards compatibility where the value in config is None _lowercase : Dict = crop_pct if crop_pct is not None else 224 / 256 _lowercase : Tuple = resample _lowercase : str = do_rescale _lowercase : str = rescale_factor _lowercase : List[str] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : float , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ) -> np.ndarray: '''simple docstring''' _lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) _lowercase : str = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct _lowercase : List[Any] = int(shortest_edge / crop_pct ) _lowercase : Tuple = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ ) _lowercase : Union[str, Any] = resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCamelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ) -> Dict: '''simple docstring''' return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : int , ) -> PIL.Image.Image: '''simple docstring''' _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : int = crop_pct if crop_pct is not None else self.crop_pct _lowercase : Dict = resample if resample is not None else self.resample _lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale _lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : Union[str, Any] = image_std if image_std is not None else self.image_std _lowercase : str = size if size is not None else self.size _lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) _lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: _lowercase : int = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: _lowercase : List[str] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: _lowercase : Any = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] _lowercase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] _lowercase : Optional[int] = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _lowercase ) -> Tuple: _lowercase : Tuple = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _lowercase : Tuple = 4 _lowercase : Union[str, Any] = 48 _lowercase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : Dict = [6, 6, 6, 6] _lowercase : Optional[int] = 60 _lowercase : List[str] = [6, 6, 6, 6] _lowercase : Dict = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : str = 4 _lowercase : str = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _lowercase : str = 1 _lowercase : Tuple = 1 _lowercase : Dict = 126 _lowercase : Optional[int] = 7 _lowercase : List[Any] = 2_5_5.0 _lowercase : Tuple = '' return config def __UpperCamelCase ( _lowercase, _lowercase ) -> str: if "patch_embed.proj" in name and "layers" not in name: _lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: _lowercase : Tuple = name.replace('layers', 'encoder.stages' ) if "residual_group.blocks" in name: _lowercase : str = name.replace('residual_group.blocks', 'layers' ) if "attn.proj" in name: _lowercase : str = name.replace('attn.proj', 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn', 'attention.self' ) if "norm1" in name: _lowercase : List[str] = name.replace('norm1', 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2', 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' ) if "q_bias" in name: _lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' ) if "k_bias" in name: _lowercase : str = name.replace('k_bias', 'key.bias' ) if "v_bias" in name: _lowercase : int = name.replace('v_bias', 'value.bias' ) if "cpb_mlp" in name: _lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: _lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' ) if name == "norm.weight": _lowercase : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": _lowercase : List[Any] = 'layernorm.bias' if "conv_first" in name: _lowercase : Tuple = name.replace('conv_first', 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _lowercase : List[str] = name.replace('conv_last', 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' ) if "upsample.0" in name: _lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' ) if "upsample.2" in name: _lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' ) _lowercase : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": _lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' ) _lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' ) else: pass else: _lowercase : Tuple = 'swin2sr.' + name return name def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : int = orig_state_dict.pop(_lowercase ) if "qkv" in key: _lowercase : Tuple = key.split('.' ) _lowercase : Optional[Any] = int(key_split[1] ) _lowercase : Any = int(key_split[4] ) _lowercase : Optional[Any] = config.embed_dim if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : int = val[dim : dim * 2, :] _lowercase : int = val[-dim:, :] else: _lowercase : Optional[Any] = val[:dim] _lowercase : Tuple = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] pass else: _lowercase : List[Any] = val return orig_state_dict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : Optional[Any] = get_config(_lowercase ) _lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase ) model.eval() _lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' ) _lowercase : Any = convert_state_dict(_lowercase, _lowercase ) _lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' _lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' ) _lowercase : Tuple = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256 _lowercase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: _lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) _lowercase : Optional[int] = model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 512, 512] ) _lowercase : Tuple = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] ) _lowercase : int = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) _lowercase : Dict = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _lowercase : List[str] = torch.Size([1, 3, 512, 512] ) _lowercase : int = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _lowercase : Any = torch.Size([1, 3, 1024, 1024] ) _lowercase : Union[str, Any] = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 ) print('Looks ok!' ) _lowercase : List[str] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } _lowercase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') _A : int =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Optional[int] =logging.get_logger(__name__) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = ["""input_features""", """is_longer"""] def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Tuple = top_db _lowercase : Any = truncation _lowercase : str = padding _lowercase : int = fft_window_size _lowercase : Any = (fft_window_size >> 1) + 1 _lowercase : int = hop_length _lowercase : Any = max_length_s _lowercase : str = max_length_s * sampling_rate _lowercase : Any = sampling_rate _lowercase : List[Any] = frequency_min _lowercase : Tuple = frequency_max _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , ) _lowercase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , ) def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]: '''simple docstring''' _lowercase : Tuple = copy.deepcopy(self.__dict__ ) _lowercase : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' _lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _lowercase : Union[str, Any] = [0] # randomly choose index for each part _lowercase : Tuple = np.random.choice(ranges[0] ) _lowercase : int = np.random.choice(ranges[1] ) _lowercase : Any = np.random.choice(ranges[2] ) _lowercase : int = mel[idx_front : idx_front + chunk_frames, :] _lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :] _lowercase : List[Any] = torch.tensor(mel[None, None, :] ) _lowercase : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ ) _lowercase : str = mel_shrink[0][0].numpy() _lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowercase : Tuple = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowercase : Any = len(UpperCamelCase_ ) - max_length _lowercase : Dict = np.random.randint(0 , overflow + 1 ) _lowercase : Optional[int] = waveform[idx : idx + max_length] _lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowercase : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) _lowercase : List[Any] = False else: _lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowercase : int = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: _lowercase : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) ) _lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) ) _lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters ) _lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature: '''simple docstring''' _lowercase : Dict = truncation if truncation is not None else self.truncation _lowercase : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : List[str] = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): _lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : int = [np.asarray(UpperCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. _lowercase : Optional[Any] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ ) for waveform in raw_speech ] _lowercase : List[Any] = [] _lowercase : Dict = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_ ) is_longer.append(UpperCamelCase_ ) if truncation == "fusion" and sum(UpperCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) ) _lowercase : str = True if isinstance(input_mel[0] , UpperCamelCase_ ): _lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _lowercase : Tuple = [[longer] for longer in is_longer] _lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer} _lowercase : Optional[int] = BatchFeature(UpperCamelCase_ ) if return_tensors is not None: _lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ ) return input_features
4
1
'''simple docstring''' import argparse import os import re import packaging.version _A : int ='''examples/''' _A : Optional[int] ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : Tuple ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : Union[str, Any] ='''README.md''' def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict: with open(_lowercase, 'r', encoding='utf-8', newline='\n' ) as f: _lowercase : Optional[int] = f.read() _lowercase , _lowercase : List[str] = REPLACE_PATTERNS[pattern] _lowercase : Optional[int] = replace.replace('VERSION', _lowercase ) _lowercase : Optional[Any] = re_pattern.sub(_lowercase, _lowercase ) with open(_lowercase, 'w', encoding='utf-8', newline='\n' ) as f: f.write(_lowercase ) def __UpperCamelCase ( _lowercase ) -> Tuple: for folder, directories, fnames in os.walk(_lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_lowercase, _lowercase ), _lowercase, pattern='examples' ) def __UpperCamelCase ( _lowercase, _lowercase=False ) -> Dict: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_lowercase, _lowercase, _lowercase ) if not patch: update_version_in_examples(_lowercase ) def __UpperCamelCase ( ) -> str: _lowercase : Union[str, Any] = '๐Ÿค— Transformers currently provides the following architectures' _lowercase : List[str] = '1. Want to contribute a new model?' with open(_lowercase, 'r', encoding='utf-8', newline='\n' ) as f: _lowercase : Dict = f.readlines() # Find the start of the list. _lowercase : List[str] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _lowercase : Union[str, Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): _lowercase : Optional[Any] = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc', 'https://huggingface.co/docs/transformers/model_doc', ) index += 1 with open(_lowercase, 'w', encoding='utf-8', newline='\n' ) as f: f.writelines(_lowercase ) def __UpperCamelCase ( ) -> str: with open(REPLACE_FILES['init'], 'r' ) as f: _lowercase : Optional[int] = f.read() _lowercase : List[Any] = REPLACE_PATTERNS['init'][0].search(_lowercase ).groups()[0] return packaging.version.parse(_lowercase ) def __UpperCamelCase ( _lowercase=False ) -> Tuple: _lowercase : Dict = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: _lowercase : int = default_version.base_version elif patch: _lowercase : Optional[int] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: _lowercase : Optional[int] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. _lowercase : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(_lowercase ) == 0: _lowercase : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(_lowercase, patch=_lowercase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def __UpperCamelCase ( ) -> List[Any]: _lowercase : Dict = get_version() _lowercase : Tuple = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' _lowercase : Optional[Any] = current_version.base_version # Check with the user we got that right. _lowercase : str = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(_lowercase ) == 0: _lowercase : int = dev_version print(f'''Updating version to {version}.''' ) global_version_update(_lowercase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : Tuple =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
'''simple docstring''' from __future__ import annotations import requests def __UpperCamelCase ( _lowercase ) -> dict: _lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_lowercase ).json() def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]: _lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories] return [get_hackernews_story(_lowercase ) for story_id in story_ids] def __UpperCamelCase ( _lowercase = 10 ) -> str: _lowercase : Tuple = hackernews_top_stories(_lowercase ) return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
1
'''simple docstring''' from collections.abc import Callable import numpy as np def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> np.ndarray: _lowercase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) ) _lowercase : int = np.zeros((n + 1,) ) _lowercase : Optional[int] = ya _lowercase : int = xa for k in range(_lowercase ): _lowercase : List[Any] = y[k] + step_size * ode_func(_lowercase, y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Dict ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """megatron-bert""" def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Dict = hidden_act _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Optional[Any] = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : Optional[Any] = use_cache
4
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = DiTPipeline A_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } A_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A_ = False def __UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) _lowercase : Union[str, Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCamelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCamelCase_ , ) _lowercase : Optional[Any] = AutoencoderKL() _lowercase : Tuple = DDIMScheduler() _lowercase : Tuple = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> List[str]: '''simple docstring''' if str(UpperCamelCase_ ).startswith('mps' ): _lowercase : Any = torch.manual_seed(UpperCamelCase_ ) else: _lowercase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) _lowercase : Any = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self : List[str] ) -> Dict: '''simple docstring''' _lowercase : Union[str, Any] = 'cpu' _lowercase : Union[str, Any] = self.get_dummy_components() _lowercase : Tuple = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : List[str] = self.get_dummy_inputs(UpperCamelCase_ ) _lowercase : Optional[int] = pipe(**UpperCamelCase_ ).images _lowercase : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowercase : Tuple = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) _lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_ , 1E-3 ) def __UpperCAmelCase ( self : Dict ) -> str: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=UpperCamelCase_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self : Any ) -> Dict: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Optional[int] ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' _lowercase : Union[str, Any] = torch.manual_seed(0 ) _lowercase : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _lowercase : List[str] = ['vase', 'umbrella', 'white shark', 'white wolf'] _lowercase : List[str] = pipe.get_label_ids(UpperCamelCase_ ) _lowercase : Optional[int] = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=40 , output_type='np' ).images for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[str] = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __UpperCAmelCase ( self : Optional[Any] ) -> int: '''simple docstring''' _lowercase : List[str] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _lowercase : Any = ['vase', 'umbrella'] _lowercase : int = pipe.get_label_ids(UpperCamelCase_ ) _lowercase : Optional[int] = torch.manual_seed(0 ) _lowercase : int = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=25 , output_type='np' ).images for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
4
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def __UpperCamelCase ( _lowercase ) -> List[Any]: _lowercase : Tuple = args.pruning_method _lowercase : int = args.threshold _lowercase : str = args.model_name_or_path.rstrip('/' ) _lowercase : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) ) _lowercase : List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _lowercase : Optional[int] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _lowercase : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _lowercase : Dict = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase ) _lowercase : Optional[Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _lowercase : Optional[Any] = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase ) _lowercase : str = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _lowercase : str = name[:-6] _lowercase : Optional[Any] = model[f'''{prefix_}mask_scores'''] _lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase ) _lowercase : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _lowercase : Optional[int] = name[:-6] _lowercase : List[str] = model[f'''{prefix_}mask_scores'''] _lowercase , _lowercase : Union[str, Any] = -0.1, 1.1 _lowercase : str = torch.sigmoid(_lowercase ) _lowercase : int = s * (r - l) + l _lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 ) _lowercase : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _lowercase : List[Any] = os.path.join( os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' ) if not os.path.isdir(_lowercase ): shutil.copytree(_lowercase, _lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) _A : List[Any] =parser.parse_args() main(args)
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _A : List[Any] ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys _A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
'''simple docstring''' _A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_lowercase, _lowercase ): _lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) _lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) _lowercase : Dict = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later _lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: _lowercase : Optional[int] = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(_lowercase ), 6 ) ).encode() + padding ) def __UpperCamelCase ( _lowercase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ): _lowercase : int = ( 'argument should be a bytes-like object or ASCII string, ' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase, _lowercase ): try: _lowercase : Optional[int] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) _lowercase : Optional[int] = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowercase : str = encoded_data[:-padding] _lowercase : Tuple = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowercase : Union[str, Any] = ''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) _lowercase : List[str] = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(_lowercase ), 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
4
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """SpeechT5FeatureExtractor""" A_ = """SpeechT5Tokenizer""" def __init__( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ) -> List[Any]: '''simple docstring''' super().__init__(UpperCamelCase_ , UpperCamelCase_ ) def __call__( self : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Any ) -> str: '''simple docstring''' _lowercase : str = kwargs.pop('audio' , UpperCamelCase_ ) _lowercase : Any = kwargs.pop('text' , UpperCamelCase_ ) _lowercase : List[str] = kwargs.pop('text_target' , UpperCamelCase_ ) _lowercase : int = kwargs.pop('audio_target' , UpperCamelCase_ ) _lowercase : Any = kwargs.pop('sampling_rate' , UpperCamelCase_ ) if audio is not None and text is not None: raise ValueError( 'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' ) if audio_target is not None and text_target is not None: raise ValueError( 'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( 'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' ) if audio is not None: _lowercase : Union[str, Any] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ ) elif text is not None: _lowercase : int = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) else: _lowercase : Optional[int] = None if audio_target is not None: _lowercase : Tuple = self.feature_extractor(audio_target=UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : str = targets['input_values'] elif text_target is not None: _lowercase : List[str] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Tuple = targets['input_ids'] else: _lowercase : Optional[Any] = None if inputs is None: return targets if targets is not None: _lowercase : Optional[Any] = labels _lowercase : List[Any] = targets.get('attention_mask' ) if decoder_attention_mask is not None: _lowercase : Union[str, Any] = decoder_attention_mask return inputs def __UpperCAmelCase ( self : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> List[Any]: '''simple docstring''' _lowercase : Optional[Any] = kwargs.pop('input_values' , UpperCamelCase_ ) _lowercase : int = kwargs.pop('input_ids' , UpperCamelCase_ ) _lowercase : Dict = kwargs.pop('labels' , UpperCamelCase_ ) if input_values is not None and input_ids is not None: raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' ) if input_values is None and input_ids is None and labels is None: raise ValueError( 'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' ) if input_values is not None: _lowercase : int = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) elif input_ids is not None: _lowercase : int = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ ) else: _lowercase : Union[str, Any] = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCamelCase_ , UpperCamelCase_ ) and "input_ids" in labels[0]): _lowercase : Any = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = targets['input_ids'] else: _lowercase : Dict = self.feature_extractor.feature_size _lowercase : Union[str, Any] = self.feature_extractor.num_mel_bins _lowercase : int = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Any = feature_size_hack _lowercase : List[str] = targets['input_values'] else: _lowercase : Dict = None if inputs is None: return targets if targets is not None: _lowercase : Optional[Any] = labels _lowercase : Union[str, Any] = targets.get('attention_mask' ) if decoder_attention_mask is not None: _lowercase : Tuple = decoder_attention_mask return inputs def __UpperCAmelCase ( self : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : int ) -> str: '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
4
'''simple docstring''' def __UpperCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __UpperCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __UpperCamelCase ( _lowercase = 1_0000 ) -> int: _lowercase : List[str] = [] for num in range(1, _lowercase ): _lowercase : Tuple = 0 _lowercase : Tuple = num while iterations < 50: _lowercase : Union[str, Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
4
1
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) _A : Union[str, Any] =logging.getLogger() def __UpperCamelCase ( _lowercase ) -> Optional[int]: _lowercase : Dict = {} _lowercase : Optional[int] = os.path.join(_lowercase, 'all_results.json' ) if os.path.exists(_lowercase ): with open(_lowercase, 'r' ) as f: _lowercase : int = json.load(_lowercase ) else: raise ValueError(f'''can\'t find {path}''' ) return results _A : Optional[int] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCamelCase__ ( A ): '''simple docstring''' def __UpperCAmelCase ( self : str ) -> Tuple: '''simple docstring''' import xla_spawn _lowercase : List[Any] = self.get_auto_remove_tmp_dir() _lowercase : List[str] = F''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(UpperCamelCase_ , 'argv' , UpperCamelCase_ ): _lowercase : Union[str, Any] = time() xla_spawn.main() _lowercase : Tuple = time() _lowercase : Optional[Any] = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' import xla_spawn _lowercase : List[str] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(UpperCamelCase_ , 'argv' , UpperCamelCase_ ): xla_spawn.main()
4
'''simple docstring''' import argparse from collections import defaultdict def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int: _lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_lowercase, 'r' ) as f: _lowercase : Optional[int] = f.readlines() _lowercase : Dict = f'''class {class_name}(''' _lowercase : List[Any] = f'''{4 * " "}def {test_name}(''' _lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}''' _lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}''' _lowercase : Dict = False _lowercase : str = False _lowercase : List[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = [] for line in lines: if line.startswith(_lowercase ): _lowercase : int = True elif in_class and line.startswith(_lowercase ): _lowercase : List[Any] = True elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )): _lowercase : str = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) _lowercase : Any = False else: new_lines.append(_lowercase ) with open(_lowercase, 'w' ) as f: for line in new_lines: f.write(_lowercase ) def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]: if fail is not None: with open(_lowercase, 'r' ) as f: _lowercase : Any = {l.strip() for l in f.readlines()} else: _lowercase : str = None with open(_lowercase, 'r' ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = defaultdict(_lowercase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) if __name__ == "__main__": _A : str =argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) _A : Union[str, Any] =parser.parse_args() main(args.correct_filename, args.fail_filename)
4
1