code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = "Hello world! cécé herlolip"
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
UpperCamelCase = roberta.model.encoder.sentence_encoder
UpperCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
UpperCamelCase = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase = roberta_sent_encoder.embed_positions.weight
UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase = roberta_sent_encoder.layer_norm.weight
UpperCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase = model.roberta.encoder.layer[i]
UpperCamelCase = roberta_sent_encoder.layers[i]
UpperCamelCase = layer.attention
UpperCamelCase = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCamelCase = roberta_layer.self_attn.q_proj.weight
UpperCamelCase = roberta_layer.self_attn.q_proj.bias
UpperCamelCase = roberta_layer.self_attn.k_proj.weight
UpperCamelCase = roberta_layer.self_attn.k_proj.bias
UpperCamelCase = roberta_layer.self_attn.v_proj.weight
UpperCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase = roberta_layer.self_attn.out_proj.weight
UpperCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase = roberta_layer.final_layer_norm.weight
UpperCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# output
UpperCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase = roberta.model.encoder.lm_head.weight
UpperCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
UpperCamelCase = model(lowercase )[0]
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
UpperCamelCase = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCamelCase = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_UpperCAmelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=4 , A_="gelu" , A_=0.0 , A_=0.1 , A_=True , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_multiple_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = weight_tying
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , use_cache=A_ )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ , output_hidden_states=A_ )
UpperCamelCase = output_from_no_past['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : int = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowercase : Any = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowercase : str = False
__lowercase : Optional[Any] = False
__lowercase : Any = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'abeja/gpt-neox-japanese-2.7b'
UpperCamelCase = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
UpperCamelCase = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
UpperCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(A_ )
UpperCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(A_ )
UpperCamelCase = []
for prompt in prompts:
UpperCamelCase = tokenizer(A_ , return_tensors='pt' ).input_ids
UpperCamelCase = model.generate(A_ , max_length=50 )
UpperCamelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import os
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = len(grid[0] )
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase ):
for j in range(n_rows - 3 ):
UpperCamelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCamelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCamelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCamelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCamelCase = max(
lowercase , lowercase , lowercase , lowercase )
if max_product > largest:
UpperCamelCase = max_product
return largest
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
with open(os.path.dirname(lowercase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
UpperCamelCase = [[int(lowercase ) for i in grid[j]] for j in range(len(lowercase ) )]
return largest_product(lowercase )
if __name__ == "__main__":
print(solution())
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
def A ( lowercase = 4_000_000 ) -> int:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase )
UpperCamelCase , UpperCamelCase = b, a + b
return sum(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Union[str, Any] = ["image_processor", "tokenizer"]
__lowercase : Optional[int] = "Pix2StructImageProcessor"
__lowercase : Dict = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = False
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 2_048 , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , **A_ )
else:
# add pixel_values and bbox
UpperCamelCase = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , header_text=A_ , **A_ )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
if "attention_mask" in text_encoding:
UpperCamelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
UpperCamelCase = text_encoding.pop('input_ids' )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase ( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase ( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Optional[Any] = 16
_UpperCAmelCase : Dict = 32
def A ( lowercase , lowercase = 16 ) -> List[str]:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase : Union[str, Any] = mocked_dataloaders # noqa: F811
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase )
UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = pd.read_csv("sample_data.csv", header=None)
_UpperCAmelCase : int = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCAmelCase : Tuple = df.iloc[:, 1:2]
_UpperCAmelCase : Tuple = actual_data.values.reshape(len_data, 1)
_UpperCAmelCase : str = MinMaxScaler().fit_transform(actual_data)
_UpperCAmelCase : Any = 10
_UpperCAmelCase : str = 5
_UpperCAmelCase : List[Any] = 20
_UpperCAmelCase : List[Any] = len_data - periods * look_back
_UpperCAmelCase : Optional[int] = actual_data[:division]
_UpperCAmelCase : Any = actual_data[division - look_back :]
_UpperCAmelCase : Any = [], []
_UpperCAmelCase : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCAmelCase : Union[str, Any] = np.array(train_x)
_UpperCAmelCase : Optional[int] = np.array(test_x)
_UpperCAmelCase : List[str] = np.array([list(i.ravel()) for i in train_y])
_UpperCAmelCase : Optional[int] = np.array([list(i.ravel()) for i in test_y])
_UpperCAmelCase : Dict = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
_UpperCAmelCase : Optional[Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCAmelCase : int = model.predict(x_test)
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A ( lowercase ) -> str:
'''simple docstring'''
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
class lowercase :
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = metric_id
class lowercase :
__lowercase : Any = [MetricMock(_SCREAMING_SNAKE_CASE ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
if "tmp_path" in args:
UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*lowercase )
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : int = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = torch.device("cpu")
def A ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = []
for k in state_dict.keys():
UpperCamelCase = k
if ".pwconv" in k:
UpperCamelCase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
UpperCamelCase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
UpperCamelCase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
UpperCamelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
UpperCamelCase = k_new.split('.' )
if ls[2].isdigit():
UpperCamelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
UpperCamelCase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase = [3, 3, 6, 4]
UpperCamelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase = [3, 3, 9, 6]
UpperCamelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase = [4, 3, 10, 5]
UpperCamelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase = [4, 4, 12, 6]
UpperCamelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
UpperCamelCase = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' , check_hash=lowercase )
else:
UpperCamelCase = torch.load(lowercase , map_location='cpu' )
UpperCamelCase = checkpoint
UpperCamelCase = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
UpperCamelCase = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
UpperCamelCase = prepare_img()
UpperCamelCase = ViTImageProcessor.from_pretrained('preprocessor_config' )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' )
# compare outputs from both models
UpperCamelCase = get_expected_output(lowercase )
UpperCamelCase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
UpperCamelCase = transform(lowercase ).unsqueeze(0 ).to(lowercase )
return image
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase )
if "blocks" in key:
UpperCamelCase = re.sub(R'blocks' , 'layers' , lowercase )
if "attn" in key:
UpperCamelCase = re.sub(R'attn' , 'self_attn' , lowercase )
if "norm1" in key:
UpperCamelCase = re.sub(R'norm1' , 'layer_norm1' , lowercase )
if "norm2" in key:
UpperCamelCase = re.sub(R'norm2' , 'layer_norm2' , lowercase )
if "encoder.norm" in key:
UpperCamelCase = re.sub(R'encoder.norm' , 'post_layernorm' , lowercase )
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase )
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase )
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase )
if "self_attn" in key:
UpperCamelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase )
return key
@torch.no_grad()
def A ( lowercase , lowercase=None ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(lowercase )
else:
UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCamelCase = BlipForConditionalGeneration(lowercase ).eval()
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCamelCase = blip_decoder(pretrained=lowercase , image_size=384 , vit='base' )
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowercase )
UpperCamelCase = rename_key(lowercase )
UpperCamelCase = value
hf_model.load_state_dict(lowercase )
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=lowercase , device='cpu' )
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase = tokenizer(['a picture of'] ).input_ids
UpperCamelCase = hf_model.generate(lowercase , lowercase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
UpperCamelCase = hf_model.generate(lowercase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCamelCase = blip_vqa(pretrained=lowercase , image_size=lowercase , vit='base' )
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowercase )
UpperCamelCase = rename_key(lowercase )
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(lowercase )
hf_vqa_model.load_state_dict(lowercase )
UpperCamelCase = ['How many dogs are in this image?']
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids
UpperCamelCase = hf_vqa_model.generate(lowercase , lowercase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCamelCase = blip_itm(pretrained=lowercase , image_size=lowercase , vit='base' )
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowercase )
UpperCamelCase = rename_key(lowercase )
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(lowercase )
UpperCamelCase = ['A picture of a woman with a dog sitting in a beach']
UpperCamelCase = tokenizer(
lowercase , return_tensors='pt' , padding='max_length' , truncation=lowercase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowercase )
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase )
UpperCamelCase = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCAmelCase : Tuple = imread("image_data/lena.jpg", 1)
# convert to its negative
_UpperCAmelCase : Tuple = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
from collections import defaultdict
def A ( lowercase , lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = first_str.lower().strip()
UpperCamelCase = second_str.lower().strip()
# Remove whitespace
UpperCamelCase = first_str.replace(' ' , '' )
UpperCamelCase = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
UpperCamelCase = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : int = input("Enter the first string ").strip()
_UpperCAmelCase : Optional[Any] = input("Enter the second string ").strip()
_UpperCAmelCase : List[Any] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''') | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
from PIL import Image
def A ( lowercase , lowercase ) -> Image:
'''simple docstring'''
UpperCamelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_UpperCAmelCase : Dict = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = AudioLDMPipeline
__lowercase : Optional[Any] = TEXT_TO_AUDIO_PARAMS
__lowercase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
__lowercase : Dict = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=A_ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
UpperCamelCase = ClapTextModelWithProjection(A_ )
UpperCamelCase = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=A_ , )
UpperCamelCase = SpeechTaHifiGan(A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> Dict:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
UpperCamelCase = prompt_embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * [inputs.pop('prompt' )]
UpperCamelCase = []
for p in [prompt, negative_prompt]:
UpperCamelCase = audioldm_pipe.tokenizer(
A_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = text_inputs['input_ids'].to(A_ )
UpperCamelCase = audioldm_pipe.text_encoder(
A_ , )
UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(A_ , dim=-1 )
embeds.append(A_ )
UpperCamelCase , UpperCamelCase = embeds
# forward
UpperCamelCase = audioldm_pipe(**A_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 'egg cracking'
UpperCamelCase = audioldm_pipe(**A_ , negative_prompt=A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ )
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **A_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.032
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**A_ )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = ['hey']
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase = SpeechTaHifiGan(A_ ).to(A_ )
UpperCamelCase = audioldm_pipe(A_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ )
@slow
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ) -> List[str]:
"""simple docstring"""
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = 25
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81_920
UpperCamelCase = audio[77_230:77_240]
UpperCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 81_920
UpperCamelCase = audio[27_780:27_790]
UpperCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A ( lowercase = True , *lowercase , **lowercase ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*lowercase , **lowercase , disable=lowercase )
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase : Any = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = 1
__lowercase : Dict = 2
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : str = 5
__lowercase : Optional[int] = 6
__lowercase : Dict = 7
__lowercase : Dict = 8
__lowercase : Optional[Any] = 9
__lowercase : List[Any] = 10
__lowercase : str = 11
__lowercase : Tuple = 12
__lowercase : List[Any] = 13
__lowercase : List[Any] = 14
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : torch.FloatTensor
class lowercase :
__lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
__lowercase : Optional[int] = []
__lowercase : str = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , return_commit_hash=A_ , **A_ , )
return cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> Tuple:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_UpperCAmelCase : Dict = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_UpperCAmelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_UpperCAmelCase : List[Any] = sorted(arg_to_scheduler.keys())
_UpperCAmelCase : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class lowercase ( pl.LightningModule ):
def __init__( self , A_ , A_=None , A_="base" , A_=None , A_=None , A_=None , **A_ , ) -> str:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A_ )
UpperCamelCase = 0
UpperCamelCase = Path(self.hparams.output_dir )
UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=A_ , **A_ , )
else:
UpperCamelCase = config
UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , A_ , A_ ):
assert hasattr(self.config , A_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , A_ , getattr(self.hparams , A_ ) )
if tokenizer is None:
UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A_ , )
else:
UpperCamelCase = tokenizer
UpperCamelCase = MODEL_MODES[mode]
if model is None:
UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A_ , )
else:
UpperCamelCase = model
def __UpperCamelCase ( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_type.from_pretrained(*A_ , **A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model
UpperCamelCase = ['bias', 'LayerNorm.weight']
UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase = Adafactor(
A_ , lr=self.hparams.learning_rate , scale_parameter=A_ , relative_step=A_ )
else:
UpperCamelCase = AdamW(
A_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase = optimizer
UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCamelCase ( self , A_ , A_ ) -> str:
"""simple docstring"""
return self.validation_step(A_ , A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
return self.validation_end(A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
UpperCamelCase = len(self.test_dataloader().dataset )
else:
UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=A_ )
UpperCamelCase = len(self.train_dataloader().dataset )
def __UpperCamelCase ( self , A_ , A_ , A_ = False ) -> List[str]:
"""simple docstring"""
raise NotImplementedError('You must implement this for your task' )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=A_ )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
A_ , list(filter(A_ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCamelCase ( self , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
UpperCamelCase = self.step_count
self.model.save_pretrained(A_ )
self.tokenizer.save_pretrained(A_ )
@staticmethod
def __UpperCamelCase ( A_ , A_ ) -> Any:
"""simple docstring"""
parser.add_argument(
'--model_name_or_path' , default=A_ , type=A_ , required=A_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=A_ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=A_ , type=A_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(A_ ).parent / 'test_run' / 'cache' ) , type=A_ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=A_ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=A_ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=A_ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=A_ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=A_ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=A_ , metavar=A_ , type=A_ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=A_ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=A_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=A_ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=A_ )
parser.add_argument('--train_batch_size' , default=32 , type=A_ )
parser.add_argument('--eval_batch_size' , default=32 , type=A_ )
parser.add_argument('--adafactor' , action='store_true' )
class lowercase ( pl.Callback ):
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase ( pl.Callback ):
def __UpperCamelCase ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A_ )
class lowercase ( pl.Callback ):
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
UpperCamelCase = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
rank_zero_info('***** Validation results *****' )
UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def __UpperCamelCase ( self , A_ , A_ ) -> str:
"""simple docstring"""
rank_zero_info('***** Test results *****' )
UpperCamelCase = trainer.callback_metrics
# Log and save results to file
UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(A_ , 'w' ) as writer:
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(lowercase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowercase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowercase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowercase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowercase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowercase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowercase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowercase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowercase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( lowercase , lowercase , lowercase=None , lowercase=True , lowercase=[] , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase )
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase )
if logging_callback is None:
UpperCamelCase = LoggingCallback()
UpperCamelCase = {}
if args.fpaa:
UpperCamelCase = 16
if args.gpus > 1:
UpperCamelCase = 'auto'
UpperCamelCase = 'ddp'
UpperCamelCase = args.accumulate_grad_batches
UpperCamelCase = None
UpperCamelCase = 'auto'
UpperCamelCase = pl.Trainer.from_argparse_args(
lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , )
if args.do_train:
trainer.fit(lowercase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
import numpy
# List of input, output pairs
_UpperCAmelCase : Optional[int] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Union[str, Any] = [2, 4, 1, 5]
_UpperCAmelCase : Tuple = len(train_data)
_UpperCAmelCase : Tuple = 0.009
def A ( lowercase , lowercase="train" ) -> Optional[Any]:
'''simple docstring'''
return calculate_hypothesis_value(lowercase , lowercase ) - output(
lowercase , lowercase )
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = 0
for i in range(len(lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A ( lowercase , lowercase=m ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = 0
for i in range(lowercase ):
if index == -1:
summation_value += _error(lowercase )
else:
summation_value += _error(lowercase ) * train_data[i][0][index]
return summation_value
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = summation_of_cost_derivative(lowercase , lowercase ) / m
return cost_derivative_value
def A ( ) -> Optional[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase = 0.0_0_0_0_0_2
UpperCamelCase = 0
UpperCamelCase = 0
while True:
j += 1
UpperCamelCase = [0, 0, 0, 0]
for i in range(0 , len(lowercase ) ):
UpperCamelCase = get_cost_derivative(i - 1 )
UpperCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase , lowercase , atol=lowercase , rtol=lowercase , ):
break
UpperCamelCase = temp_parameter_vector
print(('Number of iterations:', j) )
def A ( ) -> Any:
'''simple docstring'''
for i in range(len(lowercase ) ):
print(('Actual output value:', output(lowercase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowercase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import os
import sys
import unittest
_UpperCAmelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : str = os.path.join(git_repo_path, "src", "transformers")
_UpperCAmelCase : Union[str, Any] = "\n{0} = None\n"
_UpperCAmelCase : List[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_UpperCAmelCase : Optional[int] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(A_ )
UpperCamelCase = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(A_ , 'tokenizers' )
UpperCamelCase = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(A_ , 'tensorflow_text' )
UpperCamelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers' )
UpperCamelCase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tensorflow_text' )
UpperCamelCase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers_and_vision' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('tensorflow_text' , A_ )
self.assertIn('sentencepiece_and_tokenizers' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
UpperCamelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A ( lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def A ( lowercase , lowercase , lowercase , lowercase , lowercase=True ) -> List[Any]:
'''simple docstring'''
model.train()
UpperCamelCase = model(lowercase )
UpperCamelCase = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def A ( lowercase , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
UpperCamelCase = RegressionModel()
UpperCamelCase = deepcopy(lowercase )
UpperCamelCase = RegressionDataset(length=80 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 )
UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 )
UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
# Use a single batch
UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
# Use a single batch
UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
def A ( lowercase=False , lowercase=False ) -> str:
'''simple docstring'''
UpperCamelCase = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def A ( lowercase=False , lowercase=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Accelerator()
UpperCamelCase = RegressionDataset(length=80 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
UpperCamelCase = RegressionDataset(length=96 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Accelerator()
UpperCamelCase = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def A ( lowercase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Dict = DebertaVaTokenizer
__lowercase : Tuple = DebertaVaTokenizerFast
__lowercase : str = True
__lowercase : Optional[int] = True
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = DebertaVaTokenizer(A_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(A_ ) , 30_001 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , split_by_punct=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = [13, 1, 4_398, 25, 21, 1_289]
UpperCamelCase = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = DebertaVaTokenizer(A_ , keep_accents=A_ )
UpperCamelCase = DebertaVaTokenizerFast(A_ , keep_accents=A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCamelCase = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = DebertaVaTokenizer(A_ )
UpperCamelCase = tokenizer.encode('sequence builders' )
UpperCamelCase = tokenizer.encode('multi-sequence build' )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase :
__lowercase : Union[str, Any] = XGLMConfig
__lowercase : Dict = {}
__lowercase : str = "gelu"
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowercase : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowercase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowercase : str = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , n_embd=37 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self , A_=True ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(A_ , return_tensors='tf' , padding=A_ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=A_ , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=A_ , max_new_tokens=12 )
UpperCamelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
super().__init__(*A_ , **A_ )
requires_backends(self , 'vision' )
self.check_model_type(A_ )
def __call__( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
return super().__call__(A_ , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
return {}, {}, {}
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = load_image(A_ )
UpperCamelCase = image.size
UpperCamelCase = self.image_processor(images=A_ , return_tensors=self.framework )
return model_inputs
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.model(**A_ )
return model_outputs
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = model_outputs.predicted_depth
UpperCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=A_ )
UpperCamelCase = prediction.squeeze().cpu().numpy()
UpperCamelCase = (output * 255 / np.max(A_ )).astype('uint8' )
UpperCamelCase = Image.fromarray(A_ )
UpperCamelCase = {}
UpperCamelCase = predicted_depth
UpperCamelCase = depth
return output_dict
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
try:
with open(lowercase , 'rb' ) as flax_state_f:
UpperCamelCase = from_bytes(lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowercase , lowercase )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa , lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
UpperCamelCase = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase )
UpperCamelCase = ''
UpperCamelCase = flatten_dict(lowercase , sep='.' )
UpperCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase = []
UpperCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = jnp.transpose(lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase ):
UpperCamelCase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
UpperCamelCase = '.'.join(lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase = np.asarray(lowercase ) if not isinstance(lowercase , np.ndarray ) else flax_tensor
UpperCamelCase = torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
UpperCamelCase = list(lowercase )
if len(lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
def A ( lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = [0] * len(lowercase )
for i in range(1 , len(lowercase ) ):
# use last results for better performance - dynamic programming
UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase = j
return prefix_result
def A ( lowercase ) -> int:
'''simple docstring'''
return max(prefix_function(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "time_series_transformer"
__lowercase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , **A_ , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = prediction_length
UpperCamelCase = context_length or prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(A_ ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
UpperCamelCase = UniSpeechSatConfig()
UpperCamelCase = ''
if is_finetuned:
UpperCamelCase = UniSpeechSatForCTC(lowercase )
else:
UpperCamelCase = UniSpeechSatForPreTraining(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 701 |
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : str = logging.get_logger(__name__)
class lowercase :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ) -> str:
"""simple docstring"""
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCamelCase ( self , A_ , A_ = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.generated_responses.append(A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> str:
"""simple docstring"""
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def __UpperCamelCase ( self , A_=None , A_=None , A_=None , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def __UpperCamelCase ( self , A_ , A_=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCamelCase ( self , A_ , A_=10 , **A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCamelCase ( self , A_ , A_=True ) -> List[str]:
"""simple docstring"""
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 702 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = (DDPMScheduler,)
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCAmelCase : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def A ( lowercase , lowercase=None , lowercase=None , lowercase=None ) -> str:
'''simple docstring'''
UpperCamelCase = True
while ask_again:
UpperCamelCase = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def A ( lowercase , lowercase=[] , lowercase=None , lowercase=0 ) -> Any:
'''simple docstring'''
UpperCamelCase = BulletMenu(lowercase , lowercase )
UpperCamelCase = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase ( argparse.RawDescriptionHelpFormatter ):
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = super()._format_usage(A_ , A_ , A_ , A_ )
UpperCamelCase = usage.replace('<command> [<args>] ' , '' )
return usage
| 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( lowercase ) -> Dict: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def A ( lowercase ) -> Tuple: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowercase :
__lowercase : int
__lowercase : str
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {'a': 1, 'b': 2}
UpperCamelCase = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase = {'a': {'1': 1}, 'b': 2}
UpperCamelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {'a': 2, 'b': 3}
UpperCamelCase = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase = {'a': {'1': 2}, 'b': 3}
UpperCamelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase = {
'a': np.eye(2 ).astype(A_ ),
'b': np.zeros(3 ).astype(A_ ),
'c': np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'a': 1, 'b': 2}
UpperCamelCase = {'a': 3, 'b': 4}
UpperCamelCase = {'a': 5, 'b': 6}
UpperCamelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
class lowercase :
__lowercase : int = "bar"
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(A_ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCamelCase = {f'''{i}''': i for i in range(lowercase )}
UpperCamelCase = map_nested(lambda lowercase : x + 10 , lowercase , num_proc=lowercase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase ( _SCREAMING_SNAKE_CASE ):
@require_tf
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = NestedDataStructure(lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = NestedDataStructure(lowercase ).flatten()
assert output == expected_output
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = A(x=1 , y='foobar' )
UpperCamelCase = {'x': 1, 'y': 'foobar'}
assert asdict(lowercase ) == expected_output
UpperCamelCase = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
UpperCamelCase = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(lowercase ) == expected_output
with pytest.raises(lowercase ):
asdict([1, A(x=10 , y='foo' )] )
def A ( lowercase ) -> str:
'''simple docstring'''
return text.split()
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> Optional[Any]:
'''simple docstring'''
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(lowercase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(lowercase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
lowercase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(lowercase ) == 4
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 0 |
_UpperCAmelCase : int = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
from manim import *
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
UpperCamelCase = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
cpu_targs.append(A_ )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
UpperCamelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) , Write(A_ ) )
self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(A_ ):
UpperCamelCase = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
first_animations.append(GrowFromCenter(A_ , run_time=1 ) )
UpperCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(*A_ )
self.wait()
| 706 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A ( lowercase , lowercase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase , lowercase ) ) )
def A ( lowercase , lowercase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
UpperCamelCase = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase )
UpperCamelCase = []
for value in value_array:
UpperCamelCase = euclidean(lowercase , dataset[0] )
UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase = euclidean(lowercase , lowercase )
if dist > temp_dist:
UpperCamelCase = temp_dist
UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A ( lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase ) / (norm(lowercase ) * norm(lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : int = "cvt"
def __init__( self , A_=3 , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[64, 192, 384] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[4.0, 4.0, 4.0] , A_=[0.0, 0.0, 0.0] , A_=[0.0, 0.0, 0.0] , A_=[0.0, 0.0, 0.1] , A_=[True, True, True] , A_=[False, False, True] , A_=["dw_bn", "dw_bn", "dw_bn"] , A_=[3, 3, 3] , A_=[1, 1, 1] , A_=[2, 2, 2] , A_=[1, 1, 1] , A_=[1, 1, 1] , A_=0.02 , A_=1e-12 , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**A_ )
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = depth
UpperCamelCase = mlp_ratio
UpperCamelCase = attention_drop_rate
UpperCamelCase = drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = qkv_bias
UpperCamelCase = cls_token
UpperCamelCase = qkv_projection_method
UpperCamelCase = kernel_qkv
UpperCamelCase = padding_kv
UpperCamelCase = stride_kv
UpperCamelCase = padding_q
UpperCamelCase = stride_q
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
| 708 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 0 |
from numpy import exp, pi, sqrt
def A ( lowercase , lowercase = 0.0 , lowercase = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = args.log_outputs
UpperCamelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCamelCase = load_metric('wer' )
UpperCamelCase = load_metric('cer' )
# compute metrics
UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCamelCase = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowercase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowercase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase = f'''log_{dataset_id}_predictions.txt'''
UpperCamelCase = f'''log_{dataset_id}_targets.txt'''
with open(lowercase , 'w' ) as p, open(lowercase , 'w' ) as t:
# mapping function to write output
def write_to_file(lowercase , lowercase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowercase , with_indices=lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase = re.sub(lowercase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCamelCase = ' '.join(text.split(lowercase ) )
return text
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase = feature_extractor.sampling_rate
# resample audio
UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=lowercase ) )
# load eval pipeline
if args.device is None:
UpperCamelCase = 0 if torch.cuda.is_available() else -1
UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase ):
UpperCamelCase = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase = prediction['text']
UpperCamelCase = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCamelCase = dataset.map(lowercase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase , lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
_UpperCAmelCase : Any = parser.parse_args()
main(args)
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
from __future__ import annotations
import numpy as np
def A ( lowercase ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A ( lowercase , lowercase=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
UpperCamelCase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCamelCase = requests.get(lowercase , headers=lowercase ).json()
UpperCamelCase = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
UpperCamelCase = requests.get(url + f"&page={i + 2}" , headers=lowercase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( lowercase , lowercase=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
UpperCamelCase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCamelCase = requests.get(lowercase , headers=lowercase ).json()
UpperCamelCase = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
UpperCamelCase = requests.get(url + f"&page={i + 2}" , headers=lowercase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
UpperCamelCase = requests.get(lowercase , headers=lowercase , allow_redirects=lowercase )
UpperCamelCase = result.headers['Location']
UpperCamelCase = requests.get(lowercase , allow_redirects=lowercase )
UpperCamelCase = os.path.join(lowercase , f"{artifact_name}.zip" )
with open(lowercase , 'wb' ) as fp:
fp.write(response.content )
def A ( lowercase , lowercase=None ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = None
with zipfile.ZipFile(lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowercase ) as f:
for line in f:
UpperCamelCase = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase = line[: line.index(': ' )]
UpperCamelCase = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
UpperCamelCase = line[len('FAILED ' ) :]
failed_tests.append(lowercase )
elif filename == "job_name.txt":
UpperCamelCase = line
if len(lowercase ) != len(lowercase ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase )} for `errors` "
f"and {len(lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
UpperCamelCase = None
if job_name and job_links:
UpperCamelCase = job_links.get(lowercase , lowercase )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase = [x + [y] + [job_link] for x, y in zip(lowercase , lowercase )]
return result
def A ( lowercase , lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = [os.path.join(lowercase , lowercase ) for p in os.listdir(lowercase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowercase , job_links=lowercase ) )
return errors
def A ( lowercase , lowercase=None ) -> Any:
'''simple docstring'''
UpperCamelCase = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase = counter.most_common()
UpperCamelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) )
return r
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = test.split('::' )[0]
if test.startswith('tests/models/' ):
UpperCamelCase = test.split('/' )[2]
else:
UpperCamelCase = None
return test
def A ( lowercase , lowercase=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase = [x for x in logs if x[2] is not None]
UpperCamelCase = {x[2] for x in logs}
UpperCamelCase = {}
for test in tests:
UpperCamelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase = counter.most_common()
UpperCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase = {'count': n_errors, 'errors': error_counts}
UpperCamelCase = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) )
return r
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = '| no. | error | status |'
UpperCamelCase = '|-:|:-|:-|'
UpperCamelCase = [header, sep]
for error in reduced_by_error:
UpperCamelCase = reduced_by_error[error]['count']
UpperCamelCase = f"| {count} | {error[:100]} | |"
lines.append(lowercase )
return "\n".join(lowercase )
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = '| model | no. of errors | major error | count |'
UpperCamelCase = '|-:|-:|-:|-:|'
UpperCamelCase = [header, sep]
for model in reduced_by_model:
UpperCamelCase = reduced_by_model[model]['count']
UpperCamelCase , UpperCamelCase = list(reduced_by_model[model]['errors'].items() )[0]
UpperCamelCase = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(lowercase )
return "\n".join(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_UpperCAmelCase : str = get_job_links(args.workflow_run_id, token=args.token)
_UpperCAmelCase : List[str] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_UpperCAmelCase : Optional[Any] = k.find(" / ")
_UpperCAmelCase : Union[str, Any] = k[index + len(" / ") :]
_UpperCAmelCase : int = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_UpperCAmelCase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_UpperCAmelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_UpperCAmelCase : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_UpperCAmelCase : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_UpperCAmelCase : str = reduce_by_error(errors)
_UpperCAmelCase : Union[str, Any] = reduce_by_model(errors)
_UpperCAmelCase : Tuple = make_github_table(reduced_by_error)
_UpperCAmelCase : int = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from PIL import Image
def A ( lowercase , lowercase ) -> Image:
'''simple docstring'''
def brightness(lowercase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowercase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_UpperCAmelCase : int = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(A_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(A_ , decoder_input_ids=A_ ).logits
UpperCamelCase = optax.softmax_cross_entropy(A_ , onehot(A_ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 714 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_UpperCAmelCase : int = random.Random()
def A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2_000 , A_=24 , A_=24 , A_=0.0 , A_=16_000 , A_=True , A_=True , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = feature_size
UpperCamelCase = num_mel_bins
UpperCamelCase = padding_value
UpperCamelCase = sampling_rate
UpperCamelCase = return_attention_mask
UpperCamelCase = do_normalize
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self , A_=False , A_=False ) -> Optional[int]:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = SpeechaTextFeatureExtractionTester(self )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase = feature_extractor(A_ , padding=A_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase = feature_extractor(A_ , return_tensors='np' ).input_features
UpperCamelCase = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase = np.asarray(A_ )
UpperCamelCase = feature_extractor(A_ , return_tensors='np' ).input_features
UpperCamelCase = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
UpperCamelCase = feature_extractor(
A_ , padding=A_ , max_length=A_ , return_attention_mask=A_ )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
UpperCamelCase = feature_extractor(
A_ , max_length=A_ , padding=A_ , return_tensors='np' , return_attention_mask=A_ )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feature_extractor(
A_ , padding='max_length' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feature_extractor(
A_ , padding='longest' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feature_extractor(
A_ , padding='longest' , max_length=16 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
import torch
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
| 715 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : str = "scheduler_config.json"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = 1
__lowercase : int = 2
__lowercase : List[Any] = 3
__lowercase : str = 4
__lowercase : Optional[Any] = 5
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : jnp.ndarray
class lowercase :
__lowercase : Union[str, Any] = SCHEDULER_CONFIG_NAME
__lowercase : Dict = ["dtype"]
__lowercase : List[Any] = []
__lowercase : Dict = True
@classmethod
def __UpperCamelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , A_ , A_ = False , **A_ ) -> str:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A ( lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase ) - x.ndim) ) , lowercase )
def A ( lowercase , lowercase=0.9_9_9 , lowercase=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase = []
for i in range(lowercase ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase ) / alpha_bar(lowercase ) , lowercase ) )
return jnp.array(lowercase , dtype=lowercase )
@flax.struct.dataclass
class lowercase :
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(lowercase , lowercase , lowercase , lowercase )
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
_UpperCAmelCase : Optional[int] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
_UpperCAmelCase : Dict = {
"jukebox": 512,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
__lowercase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_ , A_=["v3", "v2", "v2"] , A_=512 , A_=5 , A_="<|endoftext|>" , **A_ , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
super().__init__(
unk_token=A_ , n_genres=A_ , version=A_ , max_n_lyric_tokens=A_ , **A_ , )
UpperCamelCase = version
UpperCamelCase = max_n_lyric_tokens
UpperCamelCase = n_genres
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCamelCase = oov.replace(r'\-\'' , r'\-+\'' )
UpperCamelCase = regex.compile(A_ )
UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [self.artists_encoder.get(A_ , 0 ) for artist in list_artists]
for genres in range(len(A_ ) ):
UpperCamelCase = [self.genres_encoder.get(A_ , 0 ) for genre in list_genres[genres]]
UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCamelCase = [[self.lyrics_encoder.get(A_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return list(A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_for_tokenization(A_ , A_ , A_ )
UpperCamelCase = self._tokenize(A_ )
return artist, genre, lyrics
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCamelCase = artists[idx].lower()
UpperCamelCase = [genres[idx].lower()]
else:
UpperCamelCase = self._normalize(artists[idx] ) + '.v2'
UpperCamelCase = [
self._normalize(A_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCamelCase = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
UpperCamelCase = {vocab[index]: index + 1 for index in range(len(A_ ) )}
UpperCamelCase = 0
UpperCamelCase = len(A_ ) + 1
UpperCamelCase = self.vocab
UpperCamelCase = {v: k for k, v in self.vocab.items()}
UpperCamelCase = ''
else:
UpperCamelCase = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
UpperCamelCase = self._run_strip_accents(A_ )
UpperCamelCase = lyrics.replace('\\' , '\n' )
UpperCamelCase = self.out_of_vocab.sub('' , A_ ), [], []
return artists, genres, lyrics
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = unicodedata.normalize('NFD' , A_ )
UpperCamelCase = []
for char in text:
UpperCamelCase = unicodedata.category(A_ )
if cat == "Mn":
continue
output.append(A_ )
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = (
[chr(A_ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(A_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(A_ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
UpperCamelCase = frozenset(A_ )
UpperCamelCase = re.compile(r'_+' )
UpperCamelCase = ''.join([c if c in accepted else '_' for c in text.lower()] )
UpperCamelCase = pattern.sub('_' , A_ ).strip('_' )
return text
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return " ".join(A_ )
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> Optional[int]:
"""simple docstring"""
# Convert to TensorType
if not isinstance(A_ , A_ ):
UpperCamelCase = TensorType(A_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
UpperCamelCase = tf.constant
UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
UpperCamelCase = torch.tensor
UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
UpperCamelCase = jnp.array
UpperCamelCase = _is_jax
else:
UpperCamelCase = np.asarray
UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCamelCase = [inputs]
if not is_tensor(A_ ):
UpperCamelCase = as_tensor(A_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , A_ , A_ , A_="" , A_="pt" ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = [0, 0, 0]
UpperCamelCase = [artist] * len(self.version )
UpperCamelCase = [genres] * len(self.version )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.tokenize(A_ , A_ , A_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._convert_token_to_id(A_ , A_ , A_ )
UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A_ ) )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A_ ) )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A_ ) )
return (artists_file, genres_file, lyrics_file)
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.artists_decoder.get(A_ )
UpperCamelCase = [self.genres_decoder.get(A_ ) for genre in genres_index]
UpperCamelCase = [self.lyrics_decoder.get(A_ ) for character in lyric_index]
return artist, genres, lyrics
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = SpeechTaTokenizer
__lowercase : Union[str, Any] = False
__lowercase : Any = True
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = SpeechTaTokenizer(A_ )
UpperCamelCase = AddedToken('<mask>' , lstrip=A_ , rstrip=A_ )
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def __UpperCamelCase ( self , A_ , A_=False , A_=20 , A_=5 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_input_output_texts(A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(A_ ) , 81 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase = tokenizer.add_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase = tokenizer.add_special_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
UpperCamelCase = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A_ , )
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
__lowercase : ClassVar[Features] = Features({"text": Value("string" )} )
__lowercase : ClassVar[Features] = Features({} )
__lowercase : str = "text"
@property
def __UpperCamelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 718 |
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 | 0 |
from typing import List
import numpy as np
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = {key: len(lowercase ) for key, value in gen_kwargs.items() if isinstance(lowercase , lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
UpperCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , lowercase )
def A ( lowercase , lowercase ) -> List[range]:
'''simple docstring'''
UpperCamelCase = []
for group_idx in range(lowercase ):
UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase = range(lowercase , start + num_shards_to_add )
shards_indices_per_group.append(lowercase )
return shards_indices_per_group
def A ( lowercase , lowercase ) -> List[dict]:
'''simple docstring'''
UpperCamelCase = _number_of_shards_in_gen_kwargs(lowercase )
if num_shards == 1:
return [dict(lowercase )]
else:
UpperCamelCase = _distribute_shards(num_shards=lowercase , max_num_jobs=lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase , lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase ) )
]
def A ( lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( lowercase , lowercase ) -> dict:
'''simple docstring'''
UpperCamelCase = {len(lowercase ) for value in gen_kwargs.values() if isinstance(lowercase , lowercase )}
UpperCamelCase = {}
for size in list_sizes:
UpperCamelCase = list(range(lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase = dict(lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase , lowercase ):
UpperCamelCase = [value[i] for i in indices_per_size[len(lowercase )]]
return shuffled_kwargs
| 719 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3 | 0 |
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = OpenLlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowercase : List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = OpenLlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
| 721 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_A : List[str] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """attention_mask"""]
def __init__( self : str , UpperCamelCase_ : Any=80 , UpperCamelCase_ : Optional[int]=1_6000 , UpperCamelCase_ : List[str]=80 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=True , **UpperCamelCase_ : Any , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = num_mel_bins
_lowercase : Dict = do_ceptral_normalize
_lowercase : str = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : Tuple = True
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
_lowercase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_lowercase : str = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
_lowercase : Optional[Any] = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
_lowercase : Dict = x[:input_length].mean(axis=0 )
_lowercase : str = np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Tuple = np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Optional[Any] = padding_value
# make sure array is in float32
_lowercase : Any = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[np.ndarray] , UpperCamelCase_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
_lowercase : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Tuple = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[Any] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Optional[int] = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Optional[Any] = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : Optional[Any] = [raw_speech]
# extract fbank features
_lowercase : str = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
_lowercase : int = BatchFeature({'input_features': features} )
_lowercase : Optional[int] = self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , UpperCamelCase_ ):
_lowercase : Optional[Any] = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[str] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Optional[int] = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_lowercase : Union[str, Any] = (
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase : List[str] = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
_lowercase : Optional[int] = padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A : Dict =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int = 32 , UpperCamelCase_ : List[Any]=PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[Any] , ) -> None:
'''simple docstring'''
_lowercase : List[Any] = do_resize
_lowercase : List[Any] = do_rescale
_lowercase : Tuple = size_divisor
_lowercase : Tuple = resample
super().__init__(**UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Tuple ) -> np.ndarray:
'''simple docstring'''
_lowercase , _lowercase : Any = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
_lowercase : Union[str, Any] = height // size_divisor * size_divisor
_lowercase : Optional[Any] = width // size_divisor * size_divisor
_lowercase : Dict = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Optional[int] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[TensorType, str]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Tuple , ) -> BatchFeature:
'''simple docstring'''
_lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = size_divisor if size_divisor is not None else self.size_divisor
_lowercase : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_lowercase : Optional[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
_lowercase : str = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : Optional[int] = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images]
_lowercase : Optional[int] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : int = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_A : Tuple ='''bert-base-cased'''
_A : Optional[Any] ='''fp16'''
_A : Optional[Any] ='''bf16'''
_A : Tuple =[FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
super().setUp()
_lowercase : Dict = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase_ ):
_lowercase : Optional[int] = self.dist_env.copy()
_lowercase : str = F'''{i + 1}'''
_lowercase : List[str] = strategy
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase_ ):
_lowercase : Tuple = self.dist_env.copy()
_lowercase : Optional[int] = prefetch_policy
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase_ ):
_lowercase : List[Any] = self.dist_env.copy()
_lowercase : Tuple = state_dict_type
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Any = AutoModel.from_pretrained(UpperCamelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
_lowercase : Dict = self.dist_env.copy()
_lowercase : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowercase : List[str] = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_lowercase : List[str] = '2000'
with mockenv_context(**UpperCamelCase_ ):
_lowercase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_lowercase : Optional[Any] = self.dist_env.copy()
_lowercase : Optional[int] = 'TRANSFORMER_BASED_WRAP'
_lowercase : int = 'T5Layer'
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_lowercase : Optional[Any] = self.dist_env.copy()
_lowercase : Optional[Any] = 'SIZE_BASED_WRAP'
_lowercase : Any = '0'
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowercase : Union[str, Any] = self.dist_env.copy()
_lowercase : Tuple = mp_dtype
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
_lowercase : int = torch.floataa
elif mp_dtype == "bf16":
_lowercase : int = torch.bfloataa
_lowercase : Tuple = MixedPrecision(param_dtype=UpperCamelCase_ , reduce_dtype=UpperCamelCase_ , buffer_dtype=UpperCamelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowercase : List[Any] = self.dist_env.copy()
_lowercase : Tuple = str(UpperCamelCase_ ).lower()
with mockenv_context(**UpperCamelCase_ ):
_lowercase : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = 0.82
_lowercase : str = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_lowercase : str = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowercase : int = 160
_lowercase : Dict = 160
_lowercase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
_lowercase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder , 'test_performance.py' )
_lowercase : Any = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_lowercase : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(UpperCamelCase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
_lowercase : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(UpperCamelCase_ ):
_lowercase : Any = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_lowercase : Any = len(UpperCamelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowercase : str = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
_lowercase : List[str] = cmd_config[:-1]
_lowercase : Dict = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
_lowercase : str = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowercase : Any = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(UpperCamelCase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : Union[str, Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowercase )] )
_lowercase : List[str] = np.array(_lowercase )
_lowercase : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), _lowercase ) ), x.transpose() ), _lowercase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : List[str] = (1, 2, 1)
_lowercase : Dict = (1, 1, 0, 7)
_lowercase : Union[str, Any] = SARIMAX(
_lowercase, exog=_lowercase, order=_lowercase, seasonal_order=_lowercase )
_lowercase : str = model.fit(disp=_lowercase, maxiter=600, method='nm' )
_lowercase : int = model_fit.predict(1, len(_lowercase ), exog=[test_match] )
return result[0]
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : Any = SVR(kernel='rbf', C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(_lowercase, _lowercase )
_lowercase : int = regressor.predict(_lowercase )
return y_pred[0]
def __UpperCamelCase ( _lowercase ) -> float:
train_user.sort()
_lowercase : str = np.percentile(_lowercase, 25 )
_lowercase : Union[str, Any] = np.percentile(_lowercase, 75 )
_lowercase : Optional[int] = qa - qa
_lowercase : Optional[Any] = qa - (iqr * 0.1)
return low_lim
def __UpperCamelCase ( _lowercase, _lowercase ) -> bool:
_lowercase : Any = 0
_lowercase : List[str] = 0
for i in list_vote:
if i > actual_result:
_lowercase : Optional[Any] = not_safe + 1
else:
if abs(abs(_lowercase ) - abs(_lowercase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A : int =[[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
_A : List[str] =pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_A : Union[str, Any] =Normalizer().fit_transform(data_input_df.values)
# split data
_A : Any =normalize_df[:, 2].tolist()
_A : Dict =normalize_df[:, 0].tolist()
_A : List[Any] =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A : Optional[Any] =normalize_df[:, [1, 2]].tolist()
_A : str =x[: len(x) - 1]
_A : str =x[len(x) - 1 :]
# for linear regression & sarimax
_A : str =total_date[: len(total_date) - 1]
_A : Tuple =total_user[: len(total_user) - 1]
_A : Union[str, Any] =total_match[: len(total_match) - 1]
_A : Optional[Any] =total_date[len(total_date) - 1 :]
_A : Optional[int] =total_user[len(total_user) - 1 :]
_A : Optional[Any] =total_match[len(total_match) - 1 :]
# voting system with forecasting
_A : Dict =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A : Any ='''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : int ={'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
_A : Tuple ={
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
_A : Optional[Any] ={
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
with open(_lowercase, 'r', encoding='utf-8' ) as f:
_lowercase : Optional[Any] = json.loads(f.read() )
_lowercase : Union[str, Any] = collections.OrderedDict()
_lowercase : List[Any] = collections.OrderedDict()
_lowercase : str = collections.OrderedDict()
with open(_lowercase, 'r', encoding='utf-8' ) as f:
_lowercase : Dict = f.readlines()
_lowercase : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_lowercase ):
_lowercase : Tuple = b
_lowercase : Any = idx
for wd in b:
_lowercase : Union[str, Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]="<|endoftext|>" , UpperCamelCase_ : Union[str, Any]="<|endoftext|>" , UpperCamelCase_ : Dict="<|startoftext|>" , UpperCamelCase_ : str="<|endoftext|>" , UpperCamelCase_ : int=False , **UpperCamelCase_ : str , ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_lowercase : Dict = do_clean_text
_lowercase , _lowercase , _lowercase , _lowercase : Dict = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return len(self.raw_vocab )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Any = ''.join(UpperCamelCase_ ).strip()
return out_string
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
_lowercase : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Any = 0
if os.path.isdir(UpperCamelCase_ ):
_lowercase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_lowercase : Optional[int] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_lowercase : Any = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_lowercase : Optional[int] = token_index
writer.write(','.join(UpperCamelCase_ ) + '\n' )
index += 1
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , UpperCamelCase_ )
return vocab_file, emoji_file
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Any:
'''simple docstring'''
_lowercase : int = vocab # same as swe
_lowercase : Optional[int] = ids_to_tokens # same as bpe
_lowercase : Optional[Any] = emoji
_lowercase : List[str] = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] )
_lowercase : Union[str, Any] = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_lowercase : Any = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_lowercase : Tuple = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_lowercase : Tuple = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_lowercase : List[str] = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_lowercase : Union[str, Any] = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_lowercase : List[Any] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_lowercase : List[str] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_lowercase : Dict = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : str ) -> str:
'''simple docstring'''
return len(self.ids_to_tokens )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : int = self.content_repattera.sub('<URL>' , UpperCamelCase_ )
_lowercase : int = self.content_repattera.sub('<EMAIL>' , UpperCamelCase_ )
_lowercase : int = self.content_repattera.sub('<TEL>' , UpperCamelCase_ )
_lowercase : Optional[int] = self.content_repattera.sub('<DATE>' , UpperCamelCase_ )
_lowercase : str = self.content_repattera.sub('<DATE>' , UpperCamelCase_ )
_lowercase : Any = self.content_repattera.sub('<PRICE>' , UpperCamelCase_ )
_lowercase : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowercase : Dict = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=False ) -> List[str]:
'''simple docstring'''
_lowercase : Dict = text.replace(' ' , '<SP>' )
_lowercase : str = text.replace(' ' , '<SP>' )
_lowercase : Optional[Any] = text.replace('\r\n' , '<BR>' )
_lowercase : Any = text.replace('\n' , '<BR>' )
_lowercase : Optional[int] = text.replace('\r' , '<BR>' )
_lowercase : Dict = text.replace('\t' , '<TAB>' )
_lowercase : Union[str, Any] = text.replace('—' , 'ー' )
_lowercase : List[str] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowercase : List[str] = text.replace(UpperCamelCase_ , UpperCamelCase_ )
if clean:
_lowercase : Tuple = self.clean_text(UpperCamelCase_ )
def check_simbol(UpperCamelCase_ : Dict ):
_lowercase : Optional[Any] = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2:
_lowercase : List[str] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(UpperCamelCase_ : Any ):
_lowercase : Optional[int] = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3:
_lowercase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
_lowercase : Dict = 0
_lowercase : Optional[Any] = []
while pos < len(UpperCamelCase_ ):
_lowercase : Dict = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_lowercase : str = [] # (token_id, token, pos)
for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ):
_lowercase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase_ ) > 2:
_lowercase : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase_ ) > 0:
# the smallest token_id is adopted
_lowercase , _lowercase , _lowercase : Optional[int] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[0] )[0]
result.append(UpperCamelCase_ )
_lowercase : Any = e
else:
_lowercase : str = pos + 1
_lowercase : Optional[Any] = text[pos:end]
if check_simbol(UpperCamelCase_ ):
result.append('<KIGOU>' )
elif checkuae(UpperCamelCase_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_lowercase : Optional[int] = end
return result
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str]="\n" ) -> List[str]:
'''simple docstring'''
_lowercase : str = []
_lowercase : Tuple = []
_lowercase : Union[str, Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode('utf-8' , errors='replace' ) )
_lowercase : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(UpperCamelCase_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode('utf-8' , errors='replace' ) )
_lowercase : Any = ''.join(UpperCamelCase_ )
return text
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : List[str] ={
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_A : Optional[int] =logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Optional[int]:
_lowercase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path', type=_lowercase, default='data/dump.txt', help='The path to the data.' )
parser.add_argument('--tokenizer_type', type=_lowercase, default='bert', choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name', type=_lowercase, default='bert-base-uncased', help='The tokenizer to use.' )
parser.add_argument('--dump_file', type=_lowercase, default='data/dump', help='The dump file prefix.' )
_lowercase : List[str] = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
_lowercase : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Tuple = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_lowercase : Any = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowercase : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Optional[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>`
_lowercase : Optional[int] = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowercase : Tuple = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_lowercase : List[str] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path, 'r', encoding='utf8' ) as fp:
_lowercase : Optional[int] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(_lowercase )} examples to process.''' )
_lowercase : Optional[int] = []
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = 1_0000
_lowercase : Tuple = time.time()
for text in data:
_lowercase : int = f'''{bos} {text.strip()} {sep}'''
_lowercase : Union[str, Any] = tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
rslt.append(_lowercase )
iter += 1
if iter % interval == 0:
_lowercase : Tuple = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
_lowercase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(_lowercase )} examples processed.''' )
_lowercase : Dict = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
_lowercase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowercase : Optional[int] = [np.uintaa(_lowercase ) for d in rslt]
else:
_lowercase : int = [np.intaa(_lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowercase, 'wb' ) as handle:
pickle.dump(rslt_, _lowercase, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCamelCase__ ( A , A ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 128 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : float = 20_00.0 , UpperCamelCase_ : int = 768 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : int = 2048 , UpperCamelCase_ : float = 0.1 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_lowercase : Tuple = nn.Sequential(
nn.Linear(UpperCamelCase_ , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , )
_lowercase : int = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowercase : List[str] = nn.Dropout(p=UpperCamelCase_ )
_lowercase : int = nn.ModuleList()
for lyr_num in range(UpperCamelCase_ ):
# FiLM conditional T5 decoder
_lowercase : Union[str, Any] = DecoderLayer(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
self.decoders.append(UpperCamelCase_ )
_lowercase : Any = TaLayerNorm(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(p=UpperCamelCase_ )
_lowercase : List[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowercase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowercase : Tuple = self.conditioning_emb(UpperCamelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowercase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowercase : Union[str, Any] = torch.broadcast_to(
torch.arange(UpperCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowercase : Any = self.position_encoding(UpperCamelCase_ )
_lowercase : List[Any] = self.continuous_inputs_projection(UpperCamelCase_ )
inputs += position_encodings
_lowercase : str = self.dropout(UpperCamelCase_ )
# decoder: No padding present.
_lowercase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowercase : List[str] = [(x, self.encoder_decoder_mask(UpperCamelCase_ , UpperCamelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowercase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowercase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowercase : Union[str, Any] = lyr(
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )[0]
_lowercase : Union[str, Any] = self.decoder_norm(UpperCamelCase_ )
_lowercase : str = self.post_dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.spec_out(UpperCamelCase_ )
return spec_out
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=1E-6 ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ ) )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str=None , ) -> Any:
'''simple docstring'''
_lowercase : int = self.layer[0](
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
if encoder_hidden_states is not None:
_lowercase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowercase : Optional[Any] = self.layer[1](
UpperCamelCase_ , key_value_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
# Apply Film Conditional Feed Forward layer
_lowercase : List[str] = self.layer[-1](UpperCamelCase_ , UpperCamelCase_ )
return (hidden_states,)
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : Union[str, Any] = TaLayerNorm(UpperCamelCase_ )
_lowercase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
_lowercase : str = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(UpperCamelCase_ )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
_lowercase : int = self.FiLMLayer(UpperCamelCase_ , UpperCamelCase_ )
# Self-attention block
_lowercase : int = self.attention(UpperCamelCase_ )
_lowercase : List[Any] = hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : Tuple = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
_lowercase : List[str] = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
_lowercase : Optional[Any] = nn.Dropout(UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = self.layer_norm(UpperCamelCase_ )
_lowercase : Tuple = self.attention(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_lowercase : Union[str, Any] = hidden_states + self.dropout(UpperCamelCase_ )
return layer_output
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase : Dict = TaDenseGatedActDense(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
_lowercase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
_lowercase : Tuple = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
_lowercase : Dict = nn.Dropout(UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str=None ) -> Any:
'''simple docstring'''
_lowercase : List[str] = self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
_lowercase : int = self.film(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Tuple = self.DenseReluDense(UpperCamelCase_ )
_lowercase : List[Any] = hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowercase : Dict = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowercase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowercase : Tuple = nn.Dropout(UpperCamelCase_ )
_lowercase : int = NewGELUActivation()
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = self.act(self.wi_a(UpperCamelCase_ ) )
_lowercase : List[Any] = self.wi_a(UpperCamelCase_ )
_lowercase : Tuple = hidden_gelu * hidden_linear
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : Optional[Any] = self.wo(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=1E-6 ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : int = nn.Parameter(torch.ones(UpperCamelCase_ ) )
_lowercase : Optional[int] = eps
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowercase : List[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __UpperCAmelCase ( self : int , UpperCamelCase_ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(UpperCamelCase_ , 3.0 )) ))
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = nn.Linear(UpperCamelCase_ , out_features * 2 , bias=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Any = self.scale_bias(UpperCamelCase_ )
_lowercase , _lowercase : Dict = torch.chunk(UpperCamelCase_ , 2 , -1 )
_lowercase : Dict = x * (1 + scale) + shift
return x
| 4 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 1 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
_lowercase : Dict = len(_lowercase )
_lowercase : Any = int(math.floor(math.sqrt(_lowercase ) ) )
_lowercase : int = 0
while arr[min(_lowercase, _lowercase ) - 1] < x:
_lowercase : List[str] = step
step += int(math.floor(math.sqrt(_lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowercase : int = prev + 1
if prev == min(_lowercase, _lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A : str =input('''Enter numbers separated by a comma:\n''').strip()
_A : int =[int(item) for item in user_input.split(''',''')]
_A : str =int(input('''Enter the number to be searched:\n'''))
_A : Optional[Any] =jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F'''Number {x} is at index {res}''')
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
import operator as op
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Optional[Any] = []
_lowercase : Any = lambda _lowercase, _lowercase : int(x / y ) # noqa: E731 integer division operation
_lowercase : str = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
else:
_lowercase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
_lowercase : Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
stack.append(
str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_A : List[str] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int =logging.get_logger(__name__)
_A : Optional[int] ={
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """glpn"""
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Any=[2, 2, 2, 2] , UpperCamelCase_ : int=[8, 4, 2, 1] , UpperCamelCase_ : int=[32, 64, 160, 256] , UpperCamelCase_ : Dict=[7, 3, 3, 3] , UpperCamelCase_ : Dict=[4, 2, 2, 2] , UpperCamelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCamelCase_ : Union[str, Any]=[4, 4, 4, 4] , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=1E-6 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Tuple=10 , UpperCamelCase_ : str=-1 , **UpperCamelCase_ : int , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Tuple = num_channels
_lowercase : List[Any] = num_encoder_blocks
_lowercase : List[str] = depths
_lowercase : Optional[Any] = sr_ratios
_lowercase : int = hidden_sizes
_lowercase : Union[str, Any] = patch_sizes
_lowercase : int = strides
_lowercase : Optional[int] = mlp_ratios
_lowercase : str = num_attention_heads
_lowercase : int = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : List[str] = initializer_range
_lowercase : Optional[Any] = drop_path_rate
_lowercase : int = layer_norm_eps
_lowercase : str = decoder_hidden_size
_lowercase : Optional[int] = max_depth
_lowercase : Optional[int] = head_in_index
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Any ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : Any =dict(zip(vocab, range(len(vocab))))
_A : Optional[Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Optional[Any] =Path(tmpdirname)
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : Tuple =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : List[str] =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Optional[Any] =FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_A : Optional[int] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : List[Any] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
_A : int ='''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=8 ) -> List[Any]:
_lowercase : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowercase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : DDPMScheduler , UpperCamelCase_ : VQModel , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
_lowercase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
if latents is None:
_lowercase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowercase : Dict = latents.to(UpperCamelCase_ )
_lowercase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowercase : List[str] = torch.device(F'''cuda:{gpu_id}''' )
_lowercase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowercase : int = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowercase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowercase , _lowercase : Optional[int] = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
_lowercase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Dict , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 100 , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = self._execution_device
_lowercase : List[str] = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Optional[int] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Optional[Any] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
_lowercase : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowercase : Optional[int] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : int = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
_lowercase : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
_lowercase : Optional[Any] = self.scheduler.timesteps
_lowercase : List[Any] = self.movq.config.latent_channels
_lowercase , _lowercase : List[str] = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
_lowercase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowercase : Dict = {'image_embeds': image_embeds, 'hint': hint}
_lowercase : str = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
_lowercase , _lowercase : str = noise_pred.split(latents.shape[1] , dim=1 )
_lowercase , _lowercase : Optional[int] = noise_pred.chunk(2 )
_lowercase , _lowercase : Dict = variance_pred.chunk(2 )
_lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowercase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowercase , _lowercase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : int = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
_lowercase : Tuple = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowercase : Any = image * 0.5 + 0.5
_lowercase : Optional[int] = image.clamp(0 , 1 )
_lowercase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCamelCase ( ) -> Optional[int]:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'-m', '--pretrained_model_name_or_path', type=_lowercase, default=_lowercase, required=_lowercase, help='Path to pretrained model or model identifier from huggingface.co/models.', )
parser.add_argument(
'-c', '--caption', type=_lowercase, default='robotic cat with wings', help='Text used to generate images.', )
parser.add_argument(
'-n', '--images_num', type=_lowercase, default=4, help='How much images to generate.', )
parser.add_argument(
'-s', '--seed', type=_lowercase, default=42, help='Seed for random process.', )
parser.add_argument(
'-ci', '--cuda_id', type=_lowercase, default=0, help='cuda_id.', )
_lowercase : Union[str, Any] = parser.parse_args()
return args
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
if not len(_lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
_lowercase , _lowercase : Dict = imgs[0].size
_lowercase : Dict = Image.new('RGB', size=(cols * w, rows * h) )
_lowercase , _lowercase : Union[str, Any] = grid.size
for i, img in enumerate(_lowercase ):
grid.paste(_lowercase, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCamelCase ( _lowercase, _lowercase="robotic cat with wings", _lowercase=7.5, _lowercase=50, _lowercase=1, _lowercase=42, ) -> Optional[Any]:
_lowercase : str = torch.Generator(pipeline.device ).manual_seed(_lowercase )
_lowercase : Tuple = pipeline(
_lowercase, guidance_scale=_lowercase, num_inference_steps=_lowercase, generator=_lowercase, num_images_per_prompt=_lowercase, ).images
_lowercase : Any = int(math.sqrt(_lowercase ) )
_lowercase : List[str] = image_grid(_lowercase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
_A : List[Any] =parse_args()
# Load models and create wrapper for stable diffusion
_A : Optional[Any] =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_A : Any =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_A : Union[str, Any] =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_A : str =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_A : List[str] =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_A : Optional[Any] =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_A : Dict =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_A : Tuple =unet.to(torch.device('''cuda''', args.cuda_id))
_A : Optional[int] =pipeline.to(unet.device)
_A , _A : Optional[int] =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_A : Union[str, Any] =os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
_A : Dict ={
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def __UpperCamelCase ( _lowercase ) -> str:
assert type(_lowercase ) in (int, float) and decimal == int(_lowercase )
_lowercase : int = int(_lowercase )
_lowercase : Dict = ''
_lowercase : Optional[int] = False
if decimal < 0:
_lowercase : Optional[int] = True
decimal *= -1
while decimal > 0:
_lowercase , _lowercase : str = divmod(_lowercase, 16 )
_lowercase : List[str] = values[remainder] + hexadecimal
_lowercase : Optional[Any] = '0x' + hexadecimal
if negative:
_lowercase : Optional[Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A : Dict =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Dict[str, int]] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Any = size if size is not None else {'shortest_edge': 256}
_lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[str] = resample
_lowercase : List[str] = do_center_crop
_lowercase : List[Any] = crop_size
_lowercase : Union[str, Any] = do_rescale
_lowercase : List[str] = rescale_factor
_lowercase : Dict = do_normalize
_lowercase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ) -> np.ndarray:
'''simple docstring'''
_lowercase : str = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : str = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_lowercase : Any = get_size_dict(UpperCamelCase_ )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : ImageInput , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Optional[Any] = size if size is not None else self.size
_lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Dict = resample if resample is not None else self.resample
_lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : int = get_size_dict(UpperCamelCase_ )
_lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
_lowercase : str = image_std if image_std is not None else self.image_std
_lowercase : Optional[int] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase : Dict = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_lowercase : Optional[int] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
_lowercase : str = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : Tuple = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
_lowercase : List[str] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
_lowercase : int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : Any = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> bool:
if len(_lowercase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : int = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase ( ) -> List[str]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowercase : Optional[int] = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase ( ) -> str:
assert _test_patching.open is open
_lowercase : Any = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, 'open', _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase ( ) -> int:
# pandas.read_csv is not present in _test_patching
_lowercase : Tuple = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching, 'pandas.read_csv', _lowercase ):
pass
def __UpperCamelCase ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowercase : Optional[Any] = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, 'len', _lowercase ) is None
with patch_submodule(_test_patching, 'len', _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase ( ) -> Any:
_lowercase : Union[str, Any] = '__test_patch_submodule_start_and_stop_mock__'
_lowercase : List[str] = patch_submodule(_test_patching, 'open', _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase ( ) -> str:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowercase : Tuple = '__test_patch_submodule_successive_join__'
_lowercase : List[str] = '__test_patch_submodule_successive_dirname__'
_lowercase : Union[str, Any] = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
with patch_submodule(_test_patching, 'os.rename', _lowercase ):
with patch_submodule(_test_patching, 'os.path.dirname', _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, 'os.rename', _lowercase ):
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
with patch_submodule(_test_patching, 'os.path.dirname', _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase ( ) -> Any:
_lowercase : Union[str, Any] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching, '__module_that_doesn_exist__.__attribute_that_doesn_exist__', _lowercase ):
pass
with patch_submodule(_test_patching, 'os.__attribute_that_doesn_exist__', _lowercase ):
pass
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_lowercase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowercase : int = model(UpperCamelCase_ )['last_hidden_state']
_lowercase : Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : int = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 1 |
'''simple docstring'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = val
_lowercase : Optional[int] = None
_lowercase : List[Any] = None
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Tuple ) -> Dict:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_lowercase : Union[str, Any] = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
_lowercase : List[Any] = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
_lowercase : Optional[int] = val
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[Any]:
# Recursive traversal
if root:
inorder(root.left, _lowercase )
res.append(root.val )
inorder(root.right, _lowercase )
def __UpperCamelCase ( _lowercase ) -> Tuple:
# Build BST
if len(_lowercase ) == 0:
return arr
_lowercase : str = Node(arr[0] )
for i in range(1, len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_lowercase : Dict = []
inorder(_lowercase, _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=13 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Dict=99 , UpperCamelCase_ : str=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Dict=37 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : List[str]=4 , ) -> str:
'''simple docstring'''
_lowercase : int = parent
_lowercase : int = batch_size
_lowercase : int = seq_length
_lowercase : str = is_training
_lowercase : int = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Dict = num_choices
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_attention_mask:
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
_lowercase : int = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs
_lowercase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = config_and_inputs
_lowercase : Optional[int] = True
_lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = FlaxBertModelTester(self )
@slow
def __UpperCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
_lowercase : str = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
def __UpperCamelCase ( ) -> list[list[int]]:
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
_A : List[Any] =generate_large_matrix()
_A : Optional[int] =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCamelCase ( _lowercase ) -> None:
assert all(row == sorted(_lowercase, reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase, reverse=_lowercase ) for col in zip(*_lowercase ) )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : List[str] = 0
_lowercase : str = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Dict = (left + right) // 2
_lowercase : Union[str, Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : str = mid + 1
else:
_lowercase : Union[str, Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Any = 0
_lowercase : str = len(grid[0] )
for i in range(len(_lowercase ) ):
_lowercase : int = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def __UpperCamelCase ( _lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def __UpperCamelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
_lowercase : List[str] = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : int = timeit(f'''{func}(grid=grid)''', setup=_lowercase, number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : Union[str, Any] ={
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_A : Dict =logging.getLogger(__name__)
@dataclass(frozen=A )
class lowerCamelCase__ :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=A )
class lowerCamelCase__ :
'''simple docstring'''
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 42
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Dict=False , UpperCamelCase_ : bool = False , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = hans_processors[task]()
_lowercase : Dict = os.path.join(
UpperCamelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
_lowercase : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase : Optional[Any] = label_list[2], label_list[1]
_lowercase : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase : Optional[int] = cached_features_file + '.lock'
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_lowercase : List[Any] = torch.load(UpperCamelCase_ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_lowercase : Optional[int] = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info('Training examples: %s' , len(UpperCamelCase_ ) )
_lowercase : Optional[int] = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info('Saving features into cached file %s' , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[Any] , UpperCamelCase_ : Tuple ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ :
'''simple docstring'''
A_ = 42
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] = 128 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : bool = False , ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = hans_processors[task]()
_lowercase : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase : Any = label_list[2], label_list[1]
_lowercase : List[Any] = label_list
_lowercase : List[str] = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
_lowercase : int = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_lowercase : str = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dataset
def __len__( self : str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , UpperCamelCase_ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.label_list
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
_lowercase : Tuple = '%s-%s' % (set_type, line[0])
_lowercase : Dict = line[5]
_lowercase : Union[str, Any] = line[6]
_lowercase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
_lowercase : Tuple = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, ) -> Any:
_lowercase : Tuple = {label: i for i, label in enumerate(_lowercase )}
_lowercase : int = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ), desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
_lowercase : Optional[Any] = tokenizer(
example.text_a, example.text_b, add_special_tokens=_lowercase, max_length=_lowercase, padding='max_length', truncation=_lowercase, return_overflowing_tokens=_lowercase, )
_lowercase : Optional[Any] = label_map[example.label] if example.label in label_map else 0
_lowercase : Dict = int(example.pairID )
features.append(InputFeatures(**_lowercase, label=_lowercase, pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
_A : List[str] ={
'''hans''': 3,
}
_A : List[str] ={
'''hans''': HansProcessor,
}
| 4 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : Dict ={
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_A : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
class lowerCamelCase__ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : list[list[bool]] ) -> None:
'''simple docstring'''
_lowercase : Union[str, Any] = row
_lowercase : int = col
_lowercase : Tuple = graph
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : list[list[bool]] ) -> None:
'''simple docstring'''
_lowercase : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowercase : Union[str, Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowercase : Union[str, Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> int: # And finally, count all islands.
'''simple docstring'''
_lowercase : str = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowercase : List[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import numpy as np
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = 1E-1_2, _lowercase = 100, ) -> tuple[float, np.ndarray]:
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowercase ) == np.iscomplexobj(_lowercase )
_lowercase : Optional[int] = np.iscomplexobj(_lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowercase, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase : Optional[Any] = False
_lowercase : Any = 0
_lowercase : List[Any] = 0
_lowercase : List[Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_lowercase : Optional[Any] = np.dot(_lowercase, _lowercase )
# Normalize the resulting output vector.
_lowercase : List[Any] = w / np.linalg.norm(_lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase : Optional[int] = vector.conj().T if is_complex else vector.T
_lowercase : Optional[Any] = np.dot(_lowercase, np.dot(_lowercase, _lowercase ) )
# Check convergence.
_lowercase : Tuple = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase : Optional[int] = True
_lowercase : str = lambda_
if is_complex:
_lowercase : Any = np.real(lambda_ )
return lambda_, vector
def __UpperCamelCase ( ) -> None:
_lowercase : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase : Optional[int] = np.array([41, 4, 20] )
_lowercase : Any = real_input_matrix.astype(np.complexaaa )
_lowercase : str = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase : Any = real_input_matrix
_lowercase : List[Any] = real_vector
elif problem_type == "complex":
_lowercase : Union[str, Any] = complex_input_matrix
_lowercase : Optional[Any] = complex_vector
# Our implementation.
_lowercase , _lowercase : Union[str, Any] = power_iteration(_lowercase, _lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase : List[Any] = np.linalg.eigh(_lowercase )
# Last eigenvalue is the maximum one.
_lowercase : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase : str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowercase ) - np.abs(_lowercase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 4 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowercase ) * abs(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_A : int =pytest.mark.integration
@pytest.mark.parametrize('path', ['paws', 'csv'] )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
inspect_dataset(_lowercase, _lowercase )
_lowercase : Any = path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path', ['accuracy'] )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
inspect_metric(_lowercase, _lowercase )
_lowercase : List[Any] = path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Any = get_dataset_config_info(_lowercase, config_name=_lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
with pytest.raises(_lowercase ):
get_dataset_config_info(_lowercase, config_name=_lowercase )
@pytest.mark.parametrize(
'path, expected', [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Tuple = get_dataset_config_names(_lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config', [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
_lowercase : List[str] = get_dataset_infos(_lowercase )
assert list(infos.keys() ) == expected_configs
_lowercase : Tuple = expected_configs[0]
assert expected_config in infos
_lowercase : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Any:
_lowercase : Any = get_dataset_infos(_lowercase )
assert expected_config in infos
_lowercase : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
with pytest.raises(_lowercase ):
get_dataset_split_names(_lowercase, config_name=_lowercase )
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : Optional[int] ={'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : int ={
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """luke"""
def __init__( self : List[Any] , UpperCamelCase_ : int=5_0267 , UpperCamelCase_ : List[Any]=50_0000 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Any=256 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Any=3072 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Tuple=1E-12 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Optional[Any]=2 , **UpperCamelCase_ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = entity_vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = entity_emb_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[str] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : Dict = initializer_range
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Optional[int] = use_entity_aware_attention
_lowercase : Optional[int] = classifier_dropout
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowercase ) )
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
_lowercase : List[str] = 0.0
for coeff in reversed(_lowercase ):
_lowercase : Dict = result * x + coeff
return result
if __name__ == "__main__":
_A : Optional[int] =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Tuple =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : str ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """vit_mae"""
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Any=3072 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : Dict=224 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : Tuple=8 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : List[str]=0.75 , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : str = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : int = image_size
_lowercase : List[str] = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Dict = qkv_bias
_lowercase : Dict = decoder_num_attention_heads
_lowercase : Any = decoder_hidden_size
_lowercase : List[Any] = decoder_num_hidden_layers
_lowercase : Union[str, Any] = decoder_intermediate_size
_lowercase : Optional[Any] = mask_ratio
_lowercase : Tuple = norm_pix_loss
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase__ ( A , A ):
'''simple docstring'''
A_ = """pixel_values"""
A_ = False
A_ = TimmBackboneConfig
def __init__( self : Tuple , UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(UpperCamelCase_ )
_lowercase : str = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
_lowercase : Optional[int] = getattr(UpperCamelCase_ , 'use_pretrained_backbone' , UpperCamelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
_lowercase : int = config.out_indices if getattr(UpperCamelCase_ , 'out_indices' , UpperCamelCase_ ) is not None else (-1,)
_lowercase : Any = timm.create_model(
config.backbone , pretrained=UpperCamelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase_ , **UpperCamelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowercase : Optional[int] = self._backbone.return_layers
_lowercase : Any = {layer['module']: str(UpperCamelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
_lowercase : Dict = kwargs.pop('config' , TimmBackboneConfig() )
_lowercase : List[Any] = kwargs.pop('use_timm_backbone' , UpperCamelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
_lowercase : Union[str, Any] = kwargs.pop('num_channels' , config.num_channels )
_lowercase : Dict = kwargs.pop('features_only' , config.features_only )
_lowercase : Optional[int] = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
_lowercase : Any = kwargs.pop('out_indices' , config.out_indices )
_lowercase : Optional[int] = TimmBackboneConfig(
backbone=UpperCamelCase_ , num_channels=UpperCamelCase_ , features_only=UpperCamelCase_ , use_pretrained_backbone=UpperCamelCase_ , out_indices=UpperCamelCase_ , )
return super()._from_config(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Union[str, Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowercase : Tuple = self._all_layers
_lowercase : Any = self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[Any] = self._return_layers
_lowercase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowercase : List[Any] = self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Tuple = None
_lowercase : Dict = tuple(UpperCamelCase_ )
_lowercase : Tuple = tuple(UpperCamelCase_ ) if hidden_states is not None else None
if not return_dict:
_lowercase : List[str] = (feature_maps,)
if output_hidden_states:
_lowercase : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase_ , hidden_states=UpperCamelCase_ , attentions=UpperCamelCase_ )
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Optional[Any]=36 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=4 , UpperCamelCase_ : int=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : str=6 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=1000 , ) -> List[Any]:
'''simple docstring'''
_lowercase : int = parent
_lowercase : Dict = batch_size
_lowercase : str = num_channels
_lowercase : Optional[Any] = image_size
_lowercase : Tuple = patch_size
_lowercase : Optional[int] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Optional[int] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : int = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[int] = coordinate_size
_lowercase : List[str] = shape_size
_lowercase : Tuple = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : List[str] = text_seq_length
_lowercase : str = (image_size // patch_size) ** 2 + 1
_lowercase : Optional[Any] = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowercase : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Dict = bbox[i, j, 3]
_lowercase : Any = bbox[i, j, 1]
_lowercase : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Union[str, Any] = bbox[i, j, 0]
_lowercase : str = tmp_coordinate
_lowercase : str = tf.constant(UpperCamelCase_ )
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Dict = None
if self.use_input_mask:
_lowercase : str = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = TFLayoutLMvaModel(config=UpperCamelCase_ )
# text + image
_lowercase : Optional[Any] = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
_lowercase : str = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , training=UpperCamelCase_ , )
_lowercase : List[str] = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : Any = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : int = model({'pixel_values': pixel_values} , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase_ )
_lowercase : Any = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.num_labels
_lowercase : List[str] = TFLayoutLMvaForTokenClassification(config=UpperCamelCase_ )
_lowercase : List[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : int = 2
_lowercase : Tuple = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase_ )
_lowercase : Union[str, Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Tuple = config_and_inputs
_lowercase : int = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
return True
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any]=False ) -> dict:
'''simple docstring'''
_lowercase : Dict = copy.deepcopy(UpperCamelCase_ )
if model_class in get_values(UpperCamelCase_ ):
_lowercase : Optional[int] = {
k: tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
_lowercase : Optional[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowercase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : List[str] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = TFLayoutLMvaModelTester(self )
_lowercase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(UpperCamelCase_ )
if getattr(UpperCamelCase_ , 'hf_compute_loss' , UpperCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowercase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase_ )[0]
]
_lowercase : Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowercase : List[str] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : Optional[int] = prepared_for_class.pop('input_ids' )
_lowercase : int = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowercase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowercase : Dict = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowercase : int = -100
_lowercase : List[str] = tf.convert_to_tensor(UpperCamelCase_ )
_lowercase : List[str] = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowercase : Dict = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : List[str] = model(UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowercase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
# Get keys that were added with the _prepare_for_class function
_lowercase : str = prepared_for_class.keys() - inputs_dict.keys()
_lowercase : List[str] = inspect.signature(model.call ).parameters
_lowercase : Optional[int] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowercase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
_lowercase : Union[str, Any] = signature_names.index(UpperCamelCase_ )
_lowercase : List[str] = label_key
_lowercase : Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowercase : Optional[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowercase : int = prepared_for_class[value]
_lowercase : Any = tuple(UpperCamelCase_ )
# Send to model
_lowercase : List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Dict = type
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( ) -> Optional[int]:
_lowercase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowercase : Dict = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : List[str] = image_processor(images=UpperCamelCase_ , return_tensors='tf' ).pixel_values
_lowercase : Dict = tf.constant([[1, 2]] )
_lowercase : Union[str, Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowercase : Optional[int] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
# verify the logits
_lowercase : Any = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
_lowercase : Union[str, Any] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_A : List[str] =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_A : List[str] ={
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_A : str ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_A : Optional[Any] ='''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_A : Optional[Any] ='''allenai'''
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_lowercase : int = dict((re.sub(r'@@$', '', _lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', _lowercase ), v) for k, v in d.items() )
_lowercase : Optional[int] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_lowercase : Optional[Any] = d[k] # restore
return da
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
# prep
assert os.path.exists(_lowercase )
os.makedirs(_lowercase, exist_ok=_lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_lowercase : List[Any] = basename(_lowercase )
_lowercase : Any = dirname(_lowercase )
_lowercase : str = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_lowercase : Any = cls.hub_models()
_lowercase : List[Any] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
_lowercase : Union[str, Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
_lowercase : int = hub_utils.from_pretrained(
_lowercase, _lowercase, _lowercase, archive_map=_lowercase, **_lowercase )
_lowercase : Dict = vars(chkpt['args']['model'] )
_lowercase : Dict = args['source_lang']
_lowercase : List[Any] = args['target_lang']
_lowercase : str = dirname(_lowercase )
_lowercase : Optional[int] = basename(_lowercase )
# dicts
_lowercase : List[Any] = os.path.join(_lowercase, f'''dict.{src_lang}.txt''' )
_lowercase : List[str] = os.path.join(_lowercase, f'''dict.{tgt_lang}.txt''' )
_lowercase : Dict = Dictionary.load(_lowercase )
_lowercase : Optional[int] = rewrite_dict_keys(src_dict.indices )
_lowercase : Optional[int] = len(_lowercase )
_lowercase : int = os.path.join(_lowercase, 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, ensure_ascii=_lowercase, indent=_lowercase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_lowercase : Dict = True
for k in src_vocab.keys():
if not k.islower():
_lowercase : Dict = False
break
_lowercase : Optional[int] = Dictionary.load(_lowercase )
_lowercase : List[str] = rewrite_dict_keys(tgt_dict.indices )
_lowercase : str = len(_lowercase )
_lowercase : Optional[int] = os.path.join(_lowercase, 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, ensure_ascii=_lowercase, indent=_lowercase ) )
# merges_file (bpecodes)
_lowercase : Tuple = os.path.join(_lowercase, VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_lowercase : int = os.path.join(_lowercase, _lowercase )
if os.path.exists(_lowercase ):
break
with open(_lowercase, encoding='utf-8' ) as fin:
_lowercase : Any = fin.read()
_lowercase : Tuple = re.sub(r' \d+$', '', _lowercase, 0, re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as fout:
fout.write(_lowercase )
# model config
_lowercase : Tuple = os.path.join(_lowercase, 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
_lowercase : Optional[Any] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
_lowercase : Any = 5
_lowercase : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_lowercase : List[Any] = best_score_hparams[model_dir]['length_penalty']
else:
_lowercase : Dict = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, ensure_ascii=_lowercase, indent=_lowercase ) )
# tokenizer config
_lowercase : Optional[Any] = os.path.join(_lowercase, _lowercase )
_lowercase : Union[str, Any] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, ensure_ascii=_lowercase, indent=_lowercase ) )
# model
_lowercase : Optional[Any] = chkpt['models'][0]
_lowercase : List[Any] = model.state_dict()
# rename keys to start with 'model.'
_lowercase : Optional[int] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_lowercase : Optional[int] = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowercase, _lowercase )
_lowercase : Dict = FSMTConfig.from_pretrained(_lowercase )
_lowercase : Optional[Any] = FSMTForConditionalGeneration(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase, strict=_lowercase )
# save
_lowercase : Any = os.path.join(_lowercase, _lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowercase, _lowercase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : List[str] =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCamelCase ( _lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(_lowercase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ) -> Iterator[int]:
_lowercase : List[Any] = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def __UpperCamelCase ( _lowercase = 200_0000 ) -> int:
return sum(takewhile(lambda _lowercase : x < n, prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.